entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hv/chvc25wy6j3j5a3u4ftzbjunfad2snt7mihkwdcp6ilzxzipobhu.py # Topologically Sorted Source Nodes: [sub, add, truediv, mul, add_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %expand), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 0.0001), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 0.0001 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, add, truediv, mul, add_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter from torch.nn.parameter import Parameter from torch.nn.modules.normalization import LayerNorm from torch.optim.lr_scheduler import * class LayerNorm(nn.Module): def __init__(self, hidden_size, eps=0.0001): super(LayerNorm, self).__init__() self.alpha = Parameter(torch.ones(1, 1, hidden_size)) self.beta = Parameter(torch.zeros(1, 1, hidden_size)) self.eps = eps def forward(self, x): """ Args: :param x: batch * len * input_size Returns: normalized x """ mu = torch.mean(x, 2, keepdim=True).expand_as(x) sigma = torch.std(x, 2, keepdim=True).expand_as(x) return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x ) + self.beta.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter from torch.nn.parameter import Parameter from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 0.0001 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, hidden_size, eps=0.0001): super(LayerNormNew, self).__init__() self.alpha = Parameter(torch.ones(1, 1, hidden_size)) self.beta = Parameter(torch.zeros(1, 1, hidden_size)) self.eps = eps def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
chunhuililili/mt_dnn
LayerNorm
false
10,192
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ay/cay3542vhmin5gvntsp37i63dfwj3bpzz2hr5fa2ukw6ibl57qp3.py # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.sigmoid] # Source node to ATen node mapping: # add => add # x => sigmoid # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) triton_poi_fused_add_sigmoid_0 = async_compile.triton('triton_poi_fused_add_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_add_sigmoid_0.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) del buf3 buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_1, sigmoid_1], Original ATen: [aten.add, aten.sigmoid] triton_poi_fused_add_sigmoid_0.run(buf5, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 return (buf5, buf2, buf5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter class GraphConvolution(nn.Module): def __init__(self, in_features, out_features): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) self.bias = Parameter(torch.FloatTensor(out_features)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) return output + self.bias class GCN(nn.Module): def __init__(self, nfeat, nhid, nout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nout) def forward(self, x, adj): x = torch.sigmoid(self.gc1(x, adj)) x = self.gc2(x, adj) return torch.sigmoid(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nout': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_sigmoid_0[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) del buf3 buf5 = buf4 del buf4 triton_poi_fused_add_sigmoid_0[grid(16)](buf5, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 return buf5, buf2, buf5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(nn.Module): def __init__(self, in_features, out_features): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) self.bias = Parameter(torch.FloatTensor(out_features)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) return output + self.bias class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nout) def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
iDMG-dynamicGCN/DatasetCollection
GCN
false
10,193
[ "MIT" ]
0
ad761b38bc86af1dd3aee6c72e819d6f00252164
https://github.com/iDMG-dynamicGCN/DatasetCollection/tree/ad761b38bc86af1dd3aee6c72e819d6f00252164
TorchLogCosh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ab/cabgownau5iwqsxnjptlkqo7tfs7f7whrulbkdvoxw7hd2jxe7p5.py # Topologically Sorted Source Nodes: [neg, mul, softplus, add], Original ATen: [aten.neg, aten.mul, aten.softplus, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # neg => neg # softplus => exp, gt, log1p, where # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %log1p), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %where), kwargs = {}) triton_poi_fused_add_mul_neg_softplus_0 = async_compile.triton('triton_poi_fused_add_mul_neg_softplus_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_neg_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_neg_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -tmp0 tmp2 = 2.0 tmp3 = tmp0 * tmp2 tmp4 = 20.0 tmp5 = tmp3 > tmp4 tmp6 = tl_math.exp(tmp3) tmp7 = libdevice.log1p(tmp6) tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp1 + tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [neg, mul, softplus, add], Original ATen: [aten.neg, aten.mul, aten.softplus, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_neg_softplus_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch as _torch class TorchLogCosh(_torch.nn.Module): """ Log(cosh) activation function for PyTorch modules """ def __init__(self): """ Init method. """ super().__init__() def forward(self, input): """ Forward pass of the function. """ return -input + _torch.nn.functional.softplus(2.0 * input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch as _torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_neg_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = 2.0 tmp3 = tmp0 * tmp2 tmp4 = 20.0 tmp5 = tmp3 > tmp4 tmp6 = tl_math.exp(tmp3) tmp7 = libdevice.log1p(tmp6) tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp1 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_neg_softplus_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TorchLogCoshNew(_torch.nn.Module): """ Log(cosh) activation function for PyTorch modules """ def __init__(self): """ Init method. """ super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
inailuig/netket
TorchLogCosh
false
10,194
[ "Apache-2.0" ]
0
ab57a6fb019edb9ac298969950724781f2ae2b22
https://github.com/inailuig/netket/tree/ab57a6fb019edb9ac298969950724781f2ae2b22
AutoEncoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wy/cwyx3wa4jndgnwzcjpr33hhlviahccyeckxfax46ztwjbjc22gd7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ra/cra224sir7auxmy3uahcdxu3s4qtjmerylv5f5l4d2w56knl3yv5.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ug/cug7jf5vyab745xxewtsuswvbenvbnnefjmrlskwtfa5kz4uwn57.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nx/cnxezssbpamkwra3kbfqrgnvkxjz7jxz6ghuajxgkavteup7fvck.py # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_4 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bb/cbbicufcwpsnk322eudyeymdafmoz4owaoryaz27xka4js5ji7rq.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_5 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ml/cmlsgnhlc5xc4fnf44r4477luwob4pv66ve4iyuq5rqd3kdwvb5k.py # Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_6 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ga/cga35qgbrnhw7eurlzscgcpykqayqvracco3nxsf4m54ddatk5m4.py # Topologically Sorted Source Nodes: [encoder], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # encoder => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/pf/cpfkk4tbw4jsooqmfkkbsgybjdh2v7owu4nh5gt6vl5ujnqsaza2.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # x_8 => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (75,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.05333333333333334), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_8 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_8(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 75 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.05333333333333334 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ta/ctaf32zegnrmc7zywdj7b3rlnnmypgwmggwjhdzaomxtlar2aiao.py # Topologically Sorted Source Nodes: [conv2d_4, x_7, x_8], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_7 => relu_4 # x_8 => _unsafe_index # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_relu_9 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 90000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 75) % 75 x0 = xindex % 75 x5 = (xindex // 5625) x2 = (xindex // 5625) % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x6), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gs/cgsmdrrasklzcwgqwzmxg2nwc32cbewmbufsrdaip57j677lvxpx.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # x_10 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (150,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {}) # %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_10 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_10(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 150 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/s6/cs6v7wmpqjghmdqt7ffkryfrmgm3c263spedaagnwe3mrvslejvi.py # Topologically Sorted Source Nodes: [conv2d_5, x_9, x_10], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_10 => _unsafe_index_1 # x_9 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_relu_11 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 360000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 150) % 150 x0 = xindex % 150 x5 = (xindex // 22500) x2 = (xindex // 22500) % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 75, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (75*tmp4) + (5625*x5)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x6), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nm/cnmeqmbgxlc5ofk6ztbccz5anlzctxk4bj6jaz7pnfnkt4gpa2pc.py # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # x_12 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_4, mul_8, mul_9 # Graph fragment: # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (300,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_4, 1), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {}) # %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_12 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 300 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6i/c6iewbflwj6cfu22sfnci2nesximabsxh4alwlp7wiuqebivgp7s.py # Topologically Sorted Source Nodes: [conv2d_6, x_11, x_12], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # x_11 => relu_6 # x_12 => _unsafe_index_2 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_relu_13 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2880000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 300) % 300 x0 = xindex % 300 x5 = (xindex // 90000) x2 = (xindex // 90000) % 8 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 150, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (150*tmp4) + (22500*x5)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x6), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7a/c7ansopitzg3spk5xqvqycbwbkncqojlhjjkwbxbfegassvy2jal.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # x_14 => add_12, add_13, convert_element_type_12, convert_element_type_13, iota_6, mul_12, mul_13 # Graph fragment: # %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (600,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_6, 1), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 0), kwargs = {}) # %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_12, torch.float32), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.5), kwargs = {}) # %convert_element_type_13 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_13, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uh/cuhltgfyk3mqt4gvq5skls3jfr2r6tja4tvefijllksfmj464gmg.py # Topologically Sorted Source Nodes: [conv2d_7, x_13, x_14], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # x_13 => relu_7 # x_14 => _unsafe_index_3 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %unsqueeze_3, %convert_element_type_13]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_relu_15 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 23040000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 600) % 600 x0 = xindex % 600 x5 = (xindex // 360000) x2 = (xindex // 360000) % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 300, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (300*tmp4) + (90000*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x6), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/u4/cu4tbmqqzyl5z5c3hh57bcdo4cb2y7mt2hhpwusjfejsxnz2zeq6.py # Topologically Sorted Source Nodes: [decoder], Original ATen: [aten.convolution] # Source node to ATen node mapping: # decoder => convolution_8 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_16 = async_compile.triton('triton_poi_fused_convolution_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4320000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 360000) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kw/ckwxv3tsnhrvojkiy2y5bsv25oo5e7x7j5jtc3wvhsorna5b5szz.py # Topologically Sorted Source Nodes: [conv2d_7, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # x_13 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_17 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5760000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 90000) % 16 x0 = xindex % 90000 x4 = (xindex // 90000) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (90112*x4)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/55/c55runksho5o5anl3oygiasl45zsqdpbi4bhr5hq65chu7yr5ypn.py # Topologically Sorted Source Nodes: [conv2d_6, x_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # x_11 => relu_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_18 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 720000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 22500) % 8 x0 = xindex % 22500 x4 = (xindex // 22500) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (22528*x4)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/q7/cq72vwuw4raackluk33hw7mf4ycnhna7xdlv65sr22faufn2c4f7.py # Topologically Sorted Source Nodes: [conv2d_5, x_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_9 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_19 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 90000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 5625) % 4 x0 = xindex % 5625 x4 = (xindex // 5625) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (5632*x4)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4u/c4u3kn2yhldzbfa6qg2rposgcct6xzamjkvklz52tt2rodu7wpzx.py # Topologically Sorted Source Nodes: [conv2d_4, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_7 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_20 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_15, (8, ), (1, )) assert_size_stride(primals_16, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_17, (16, ), (1, )) assert_size_stride(primals_18, (3, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_19, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 32768, grid=grid(32768), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 8192, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 16, 16), (1024, 256, 16, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 4096, grid=grid(4096), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 1024, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 8, 8), (256, 64, 8, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf13, primals_9, 1024, grid=grid(1024), stream=stream0) del primals_9 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int8) # Topologically Sorted Source Nodes: [encoder], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf13, buf14, buf15, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1)) buf17 = empty_strided_cuda((75, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_8.run(buf17, 75, grid=grid(75), stream=stream0) buf18 = empty_strided_cuda((4, 4, 75, 75), (22500, 5625, 75, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_4, x_7, x_8], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] triton_poi_fused__unsafe_index_convolution_relu_9.run(buf17, buf16, primals_11, buf18, 90000, grid=grid(90000), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 75, 75), (22500, 5625, 75, 1)) buf20 = empty_strided_cuda((150, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_10.run(buf20, 150, grid=grid(150), stream=stream0) buf21 = empty_strided_cuda((4, 4, 150, 150), (90000, 22500, 150, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_5, x_9, x_10], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] triton_poi_fused__unsafe_index_convolution_relu_11.run(buf20, buf19, primals_13, buf21, 360000, grid=grid(360000), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 150, 150), (180000, 22500, 150, 1)) buf23 = empty_strided_cuda((300, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_12.run(buf23, 300, grid=grid(300), stream=stream0) buf24 = empty_strided_cuda((4, 8, 300, 300), (720000, 90000, 300, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_6, x_11, x_12], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] triton_poi_fused__unsafe_index_convolution_relu_13.run(buf23, buf22, primals_15, buf24, 2880000, grid=grid(2880000), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 16, 300, 300), (1440000, 90000, 300, 1)) buf26 = empty_strided_cuda((600, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_14.run(buf26, 600, grid=grid(600), stream=stream0) buf27 = empty_strided_cuda((4, 16, 600, 600), (5760000, 360000, 600, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_7, x_13, x_14], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index] triton_poi_fused__unsafe_index_convolution_relu_15.run(buf26, buf25, primals_17, buf27, 23040000, grid=grid(23040000), stream=stream0) # Topologically Sorted Source Nodes: [decoder], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 3, 600, 600), (1080000, 360000, 600, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [decoder], Original ATen: [aten.convolution] triton_poi_fused_convolution_16.run(buf29, primals_19, 4320000, grid=grid(4320000), stream=stream0) del primals_19 buf30 = empty_strided_cuda((4, 16, 300, 300), (1441792, 90112, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_7, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_17.run(buf25, primals_17, buf30, 5760000, grid=grid(5760000), stream=stream0) del buf25 del primals_17 buf31 = empty_strided_cuda((4, 8, 150, 150), (180224, 22528, 150, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_6, x_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_18.run(buf22, primals_15, buf31, 720000, grid=grid(720000), stream=stream0) del buf22 del primals_15 buf32 = empty_strided_cuda((4, 4, 75, 75), (22528, 5632, 75, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_5, x_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_19.run(buf19, primals_13, buf32, 90000, grid=grid(90000), stream=stream0) del buf19 del primals_13 buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_4, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_20.run(buf16, primals_11, buf33, 256, grid=grid(256), stream=stream0) del buf16 del primals_11 return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24, buf26, buf27, buf30, buf31, buf32, buf33, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((3, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class AutoEncoder(nn.Module): def __init__(self): super(AutoEncoder, self).__init__() self.encoder1 = nn.Conv2d(3, 16, 3, padding=1) self.encoder2 = nn.Conv2d(16, 8, 3, padding=1) self.encoder3 = nn.Conv2d(8, 4, 3, padding=1) self.encoder4 = nn.Conv2d(4, 4, 3, padding=1) self.pool = nn.MaxPool2d(2, ceil_mode=True) self.up1 = nn.Upsample(75, mode='nearest') self.up = nn.Upsample(scale_factor=2, mode='nearest') self.decoder1 = nn.Conv2d(4, 4, 3, padding=1) self.decoder2 = nn.Conv2d(4, 4, 3, padding=1) self.decoder3 = nn.Conv2d(4, 8, 3, padding=1) self.decoder4 = nn.Conv2d(8, 16, 3, padding=1) self.decoder5 = nn.Conv2d(16, 3, 3, padding=1) def forward(self, x): x = F.relu(self.encoder1(x)) x = self.pool(x) x = F.relu(self.encoder2(x)) x = self.pool(x) x = F.relu(self.encoder3(x)) x = self.pool(x) x = F.relu(self.encoder4(x)) encoder = self.pool(x) x = F.relu(self.decoder1(encoder)) x = self.up1(x) x = F.relu(self.decoder2(x)) x = self.up(x) x = F.relu(self.decoder3(x)) x = self.up(x) x = F.relu(self.decoder4(x)) x = self.up(x) decoder = self.decoder5(x) return decoder def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_8(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 75 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.05333333333333334 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 90000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 75 % 75 x0 = xindex % 75 x5 = xindex // 5625 x2 = xindex // 5625 % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 150 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 360000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 150 % 150 x0 = xindex % 150 x5 = xindex // 22500 x2 = xindex // 22500 % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 75, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 75 * tmp4 + 5625 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 300 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2880000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 300 % 300 x0 = xindex % 300 x5 = xindex // 90000 x2 = xindex // 90000 % 8 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 150, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 150 * tmp4 + 22500 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 600 % 600 x0 = xindex % 600 x5 = xindex // 360000 x2 = xindex // 360000 % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 300, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 300 * tmp4 + 90000 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, None) @triton.jit def triton_poi_fused_convolution_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4320000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 360000 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5760000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 90000 % 16 x0 = xindex % 90000 x4 = xindex // 90000 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 90112 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 720000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 22500 % 8 x0 = xindex % 22500 x4 = xindex // 22500 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 22528 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 90000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 5625 % 4 x0 = xindex % 5625 x4 = xindex // 5625 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 5632 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_15, (8,), (1,)) assert_size_stride(primals_16, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_17, (16,), (1,)) assert_size_stride(primals_18, (3, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_19, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(32768)](buf5, primals_5, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch .float32) buf7 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_3[grid(8192)](buf5, buf6, buf7, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 16, 16), (1024, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(4096)](buf9, primals_7, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(1024)](buf9, buf10, buf11, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 8, 8), (256, 64, 8, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_6[grid(1024)](buf13, primals_9, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(256)](buf13, buf14, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1)) buf17 = empty_strided_cuda((75,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_8[grid(75)](buf17, 75, XBLOCK=128, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((4, 4, 75, 75), (22500, 5625, 75, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_9[grid(90000)](buf17, buf16, primals_11, buf18, 90000, XBLOCK=512, num_warps=8, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 75, 75), (22500, 5625, 75, 1)) buf20 = empty_strided_cuda((150,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_10[grid(150)](buf20, 150, XBLOCK=128, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((4, 4, 150, 150), (90000, 22500, 150, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_11[grid(360000)](buf20, buf19, primals_13, buf21, 360000, XBLOCK=512, num_warps=8, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 150, 150), (180000, 22500, 150, 1)) buf23 = empty_strided_cuda((300,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_12[grid(300)](buf23, 300, XBLOCK=256, num_warps=4, num_stages=1) buf24 = empty_strided_cuda((4, 8, 300, 300), (720000, 90000, 300, 1 ), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_13[grid(2880000)](buf23 , buf22, primals_15, buf24, 2880000, XBLOCK=1024, num_warps=4, num_stages=1) buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 16, 300, 300), (1440000, 90000, 300, 1)) buf26 = empty_strided_cuda((600,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_14[grid(600)](buf26, 600, XBLOCK=256, num_warps=4, num_stages=1) buf27 = empty_strided_cuda((4, 16, 600, 600), (5760000, 360000, 600, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_15[grid(23040000)]( buf26, buf25, primals_17, buf27, 23040000, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 3, 600, 600), (1080000, 360000, 600, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_16[grid(4320000)](buf29, primals_19, 4320000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf30 = empty_strided_cuda((4, 16, 300, 300), (1441792, 90112, 300, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_17[grid(5760000)]( buf25, primals_17, buf30, 5760000, XBLOCK=1024, num_warps=4, num_stages=1) del buf25 del primals_17 buf31 = empty_strided_cuda((4, 8, 150, 150), (180224, 22528, 150, 1 ), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_18[grid(720000)]( buf22, primals_15, buf31, 720000, XBLOCK=1024, num_warps=4, num_stages=1) del buf22 del primals_15 buf32 = empty_strided_cuda((4, 4, 75, 75), (22528, 5632, 75, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(90000)]( buf19, primals_13, buf32, 90000, XBLOCK=1024, num_warps=4, num_stages=1) del buf19 del primals_13 buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_20[grid(256)]( buf16, primals_11, buf33, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf16 del primals_11 return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24, buf26, buf27, buf30, buf31, buf32, buf33) class AutoEncoderNew(nn.Module): def __init__(self): super(AutoEncoderNew, self).__init__() self.encoder1 = nn.Conv2d(3, 16, 3, padding=1) self.encoder2 = nn.Conv2d(16, 8, 3, padding=1) self.encoder3 = nn.Conv2d(8, 4, 3, padding=1) self.encoder4 = nn.Conv2d(4, 4, 3, padding=1) self.pool = nn.MaxPool2d(2, ceil_mode=True) self.up1 = nn.Upsample(75, mode='nearest') self.up = nn.Upsample(scale_factor=2, mode='nearest') self.decoder1 = nn.Conv2d(4, 4, 3, padding=1) self.decoder2 = nn.Conv2d(4, 4, 3, padding=1) self.decoder3 = nn.Conv2d(4, 8, 3, padding=1) self.decoder4 = nn.Conv2d(8, 16, 3, padding=1) self.decoder5 = nn.Conv2d(16, 3, 3, padding=1) def forward(self, input_0): primals_1 = self.encoder1.weight primals_2 = self.encoder1.bias primals_4 = self.encoder2.weight primals_5 = self.encoder2.bias primals_6 = self.encoder3.weight primals_7 = self.encoder3.bias primals_8 = self.encoder4.weight primals_9 = self.encoder4.bias primals_10 = self.decoder1.weight primals_11 = self.decoder1.bias primals_12 = self.decoder2.weight primals_13 = self.decoder2.bias primals_14 = self.decoder3.weight primals_15 = self.decoder3.bias primals_16 = self.decoder4.weight primals_17 = self.decoder4.bias primals_18 = self.decoder5.weight primals_19 = self.decoder5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
gjustin40/Pytorch-Cookbook
AutoEncoder
false
10,195
[ "MIT" ]
0
069514d05b00d07521e1a1a028d0746b65099586
https://github.com/gjustin40/Pytorch-Cookbook/tree/069514d05b00d07521e1a1a028d0746b65099586
DQN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 16), (16, 1)) assert_size_stride(primals_5, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 1024, grid=grid(1024), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 2), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class DQN(nn.Module): """DQN network, three full connection layers """ def __init__(self): super(DQN, self).__init__() self.fc1 = nn.Linear(4, 16) self.fc1.weight.data.normal_(0, 0.1) self.fc2 = nn.Linear(16, 2) self.fc2.weight.data.normal_(0, 0.1) def forward(self, x): x = self.fc1(x) x = F.relu(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 16), (16, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 2), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3 class DQNNew(nn.Module): """DQN network, three full connection layers """ def __init__(self): super(DQNNew, self).__init__() self.fc1 = nn.Linear(4, 16) self.fc1.weight.data.normal_(0, 0.1) self.fc2 = nn.Linear(16, 2) self.fc2.weight.data.normal_(0, 0.1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ivanwhaf/RL
DQN
false
10,196
[ "MIT" ]
0
1610b3684269b1d60543c60460e9ee65309594ee
https://github.com/ivanwhaf/RL/tree/1610b3684269b1d60543c60460e9ee65309594ee
GeLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3v/c3vxe4labx22axljvqnpfvh4p4urhfykzepqmzyxdeaf6uj4elja.py # Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.044715), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %arg0_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.044715 tmp4 = tmp0 * tmp3 tmp5 = tmp4 * tmp0 tmp6 = tmp5 * tmp0 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GeLU(nn.Module): def __init__(self): super().__init__() def forward(self, x): return 0.5 * x * (1 + F.tanh(0.7978845608 * (x + 0.044715 * x * x * x)) ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.044715 tmp4 = tmp0 * tmp3 tmp5 = tmp4 * tmp0 tmp6 = tmp5 * tmp0 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GeLUNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
irustandi/sentiment-discovery
GeLU
false
10,197
[ "BSD-3-Clause" ]
0
a2e074f33bbac94ec9dba111a91da026633dad67
https://github.com/irustandi/sentiment-discovery/tree/a2e074f33bbac94ec9dba111a91da026633dad67
Generator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/w2/cw26etiuqgfsnlcvfovjrjfkwerbr3hb33ggi6l6pg47hpyjzaos.py # Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3d/c3dyw6akmjhpp35jaqq3z7ozrdt7ywexn7u7aab2ekphhcccz3vm.py # Topologically Sorted Source Nodes: [concat1], Original ATen: [aten.cat] # Source node to ATen node mapping: # concat1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 98304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 6 x0 = xindex % 4096 x2 = (xindex // 24576) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + ((-3) + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/l7/cl7utfe7bhcs2m55c3vdbkrwcvmnsjji6vqhnykjtsppv3gprqlv.py # Topologically Sorted Source Nodes: [concat2], Original ATen: [aten.cat] # Source node to ATen node mapping: # concat2 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_2], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 98304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 6 x0 = xindex % 4096 x2 = (xindex // 24576) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 6, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp12, other=0.0) tmp16 = tl.load(in_ptr3 + ((-3) + x1), tmp12, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + (x3), tmp21, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uw/cuwzisxb4drputjagww2hliycpjgkk573x3xmc4tbpcgmr5atzho.py # Topologically Sorted Source Nodes: [concat3], Original ATen: [aten.cat] # Source node to ATen node mapping: # concat3 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1, %relu_2, %relu_3], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 196608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 12 x0 = xindex % 4096 x2 = (xindex // 49152) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + ((-3) + x1), tmp9, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.full([1], 0, tl.int32) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tmp0 >= tmp7 tmp18 = tl.full([1], 9, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr3 + (x0 + (4096*((-6) + x1)) + (12288*x2)), tmp20, other=0.0) tmp22 = tl.load(in_ptr4 + ((-6) + x1), tmp20, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = triton_helpers.maximum(tmp13, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp20, tmp24, tmp25) tmp27 = tmp0 >= tmp18 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tl.load(in_ptr5 + (x0 + (4096*((-9) + x1)) + (12288*x2)), tmp27, other=0.0) tmp31 = tl.load(in_ptr6 + ((-9) + x1), tmp27, eviction_policy='evict_last', other=0.0) tmp32 = tmp30 + tmp31 tmp33 = triton_helpers.maximum(tmp13, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp27, tmp33, tmp34) tmp36 = tl.where(tmp20, tmp26, tmp35) tmp37 = tl.where(tmp9, tmp16, tmp36) tmp38 = tl.where(tmp4, tmp5, tmp37) tl.store(out_ptr0 + (x3), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tv/ctvjgj5twpgsgagm4xdsbg4sdecdb6ignjpton25dplnkoizkzvq.py # Topologically Sorted Source Nodes: [clean_image_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # clean_image_1 => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_2, %relu_5], 1), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 3 x0 = xindex % 4096 x2 = (xindex // 12288) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (12288*x2)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-1) + x1)) + (8192*x2)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + ((-1) + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr0 + (4096 + x0 + (4096*((-1) + x1)) + (12288*x2)), tmp6, other=0.0) tmp15 = tmp13 * tmp14 tmp16 = tmp15 - tmp13 tmp17 = 1.0 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp6, tmp19, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x3), tmp22, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ol/colle6vvmqfrzye7tnywfjubc6wqawwkd5lk6x4zt35sefqjsnbo.py # Topologically Sorted Source Nodes: [conv2d_4, x5, mul, sub, add, clean_image], Original ATen: [aten.convolution, aten.relu, aten.mul, aten.sub, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # add => add # clean_image => relu_5 # conv2d_4 => convolution_4 # mul => mul # sub => sub # x5 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_4, %slice_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %relu_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5 = async_compile.triton('triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 2 x2 = (xindex // 8192) x4 = xindex % 8192 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (4096 + x4 + (12288*x2)), None) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp4 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp3, tmp9) tmp11 = 0.0 tmp12 = tmp10 <= tmp11 tmp13 = tmp4 <= tmp11 tl.store(out_ptr0 + (x3), tmp12, None) tl.store(out_ptr1 + (x3), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/b6/cb6y5xus3enlp35rnceehsj36ohxzegpsjibpv5quvxf5hjtsolh.py # Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x4 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_8, %primals_9, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (3, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (3, ), (1, )) assert_size_stride(primals_6, (3, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_7, (3, ), (1, )) assert_size_stride(primals_8, (3, 6, 7, 7), (294, 49, 7, 1)) assert_size_stride(primals_9, (3, ), (1, )) assert_size_stride(primals_10, (2, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_11, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 49152, grid=grid(49152), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [concat1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf2, primals_5, buf3, 98304, grid=grid(98304), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf5 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [concat2], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf2, primals_5, buf4, primals_7, buf5, 98304, grid=grid(98304), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [concat3], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(buf1, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 196608, grid=grid(196608), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 2, 64, 64), (8192, 4096, 64, 1)) buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [clean_image_1], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(primals_3, buf8, primals_11, buf9, 49152, grid=grid(49152), stream=stream0) buf10 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1), torch.bool) buf11 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_4, x5, mul, sub, add, clean_image], Original ATen: [aten.convolution, aten.relu, aten.mul, aten.sub, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5.run(buf8, primals_11, primals_3, buf10, buf11, 32768, grid=grid(32768), stream=stream0) del buf8 del primals_11 buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_6.run(buf6, primals_9, buf12, 49152, grid=grid(49152), stream=stream0) del buf6 del primals_9 buf13 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_6.run(buf4, primals_7, buf13, 49152, grid=grid(49152), stream=stream0) del buf4 del primals_7 buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_6.run(buf2, primals_5, buf14, 49152, grid=grid(49152), stream=stream0) del buf2 del primals_5 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, buf14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((3, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((3, 6, 7, 7), (294, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((2, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.relu = nn.ReLU(inplace=True) self.e_conv1 = nn.Conv2d(3, 3, 1, 1, 0, bias=True) self.e_conv2 = nn.Conv2d(3, 3, 3, 1, 1, bias=True) self.e_conv3 = nn.Conv2d(6, 3, 5, 1, 2, bias=True) self.e_conv4 = nn.Conv2d(6, 3, 7, 1, 3, bias=True) self.e_conv5 = nn.Conv2d(12, 2, 3, 1, 1, bias=True) def forward(self, x): x1 = self.relu(self.e_conv1(x)) x2 = self.relu(self.e_conv2(x1)) concat1 = torch.cat((x1, x2), 1) x3 = self.relu(self.e_conv3(concat1)) concat2 = torch.cat((x2, x3), 1) x4 = self.relu(self.e_conv4(concat2)) concat3 = torch.cat((x1, x2, x3, x4), 1) x5 = self.relu(self.e_conv5(concat3)) x_sub = torch.narrow(x, 1, 1, 2) clean_image = self.relu(x5 * x_sub - x5 + 1) x_sub = torch.narrow(x, 1, 0, 1) clean_image = torch.cat((x_sub, clean_image), 1) """ x5 = self.relu(self.e_conv5(concat3)) clean_image = self.relu((x5 * x_sub) - x5 + 1) clean_image = torch.narrow(clean_image,) """ return clean_image """ def __init__(self, scale_factor): upsample_block_num = int(math.log(scale_factor, 2)) super(Generator, self).__init__() self.block1 = nn.Sequential( nn.Conv2d(3, 64, kernel_size=9, padding=4), nn.PReLU() ) self.block2 = ResidualBlock(64) self.block3 = ResidualBlock(64) self.block4 = ResidualBlock(64) self.block5 = ResidualBlock(64) self.block6 = ResidualBlock(64) self.block7 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64) ) block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)] block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4)) self.block8 = nn.Sequential(*block8) def forward(self, x): block1 = self.block1(x) block2 = self.block2(block1) block3 = self.block3(block2) block4 = self.block4(block3) block5 = self.block5(block4) block6 = self.block6(block5) block7 = self.block7(block6) block8 = self.block8(block1 + block7) return (torch.tanh(block8) + 1) / 2 """ def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 6 x0 = xindex % 4096 x2 = xindex // 24576 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-3 + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 6 x0 = xindex % 4096 x2 = xindex // 24576 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp12, other=0.0) tmp16 = tl.load(in_ptr3 + (-3 + x1), tmp12, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 12 x0 = xindex % 4096 x2 = xindex // 49152 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-3 + x1), tmp9, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.full([1], 0, tl.int32) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tmp0 >= tmp7 tmp18 = tl.full([1], 9, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr3 + (x0 + 4096 * (-6 + x1) + 12288 * x2), tmp20, other=0.0) tmp22 = tl.load(in_ptr4 + (-6 + x1), tmp20, eviction_policy= 'evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = triton_helpers.maximum(tmp13, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp20, tmp24, tmp25) tmp27 = tmp0 >= tmp18 tl.full([1], 12, tl.int64) tmp30 = tl.load(in_ptr5 + (x0 + 4096 * (-9 + x1) + 12288 * x2), tmp27, other=0.0) tmp31 = tl.load(in_ptr6 + (-9 + x1), tmp27, eviction_policy= 'evict_last', other=0.0) tmp32 = tmp30 + tmp31 tmp33 = triton_helpers.maximum(tmp13, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp27, tmp33, tmp34) tmp36 = tl.where(tmp20, tmp26, tmp35) tmp37 = tl.where(tmp9, tmp16, tmp36) tmp38 = tl.where(tmp4, tmp5, tmp37) tl.store(out_ptr0 + x3, tmp38, None) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 3 x0 = xindex % 4096 x2 = xindex // 12288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 12288 * x2), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 3, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-1 + x1) + 8192 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-1 + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr0 + (4096 + x0 + 4096 * (-1 + x1) + 12288 * x2), tmp6, other=0.0) tmp15 = tmp13 * tmp14 tmp16 = tmp15 - tmp13 tmp17 = 1.0 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp6, tmp19, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x3, tmp22, None) @triton.jit def triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 2 x2 = xindex // 8192 x4 = xindex % 8192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (4096 + x4 + 12288 * x2), None) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp4 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp3, tmp9) tmp11 = 0.0 tmp12 = tmp10 <= tmp11 tmp13 = tmp4 <= tmp11 tl.store(out_ptr0 + x3, tmp12, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (3,), (1,)) assert_size_stride(primals_6, (3, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_7, (3,), (1,)) assert_size_stride(primals_8, (3, 6, 7, 7), (294, 49, 7, 1)) assert_size_stride(primals_9, (3,), (1,)) assert_size_stride(primals_10, (2, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_11, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(49152)](buf1, primals_2, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(98304)](buf1, buf2, primals_5, buf3, 98304, XBLOCK=1024, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf5 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(98304)](buf2, primals_5, buf4, primals_7, buf5, 98304, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(196608)](buf1, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 196608, XBLOCK=512, num_warps =8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 2, 64, 64), (8192, 4096, 64, 1)) buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_4[grid(49152)](primals_3, buf8, primals_11, buf9, 49152, XBLOCK=512, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1), torch.bool) buf11 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1), torch.bool) triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_5[grid (32768)](buf8, primals_11, primals_3, buf10, buf11, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del primals_11 buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(49152)]( buf6, primals_9, buf12, 49152, XBLOCK=512, num_warps=4, num_stages=1) del buf6 del primals_9 buf13 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(49152)]( buf4, primals_7, buf13, 49152, XBLOCK=512, num_warps=4, num_stages=1) del buf4 del primals_7 buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(49152)]( buf2, primals_5, buf14, 49152, XBLOCK=512, num_warps=4, num_stages=1) del buf2 del primals_5 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, buf14) class GeneratorNew(nn.Module): def __init__(self): super(GeneratorNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.e_conv1 = nn.Conv2d(3, 3, 1, 1, 0, bias=True) self.e_conv2 = nn.Conv2d(3, 3, 3, 1, 1, bias=True) self.e_conv3 = nn.Conv2d(6, 3, 5, 1, 2, bias=True) self.e_conv4 = nn.Conv2d(6, 3, 7, 1, 3, bias=True) self.e_conv5 = nn.Conv2d(12, 2, 3, 1, 1, bias=True) """ def __init__(self, scale_factor): upsample_block_num = int(math.log(scale_factor, 2)) super(Generator, self).__init__() self.block1 = nn.Sequential( nn.Conv2d(3, 64, kernel_size=9, padding=4), nn.PReLU() ) self.block2 = ResidualBlock(64) self.block3 = ResidualBlock(64) self.block4 = ResidualBlock(64) self.block5 = ResidualBlock(64) self.block6 = ResidualBlock(64) self.block7 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64) ) block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)] block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4)) self.block8 = nn.Sequential(*block8) def forward(self, x): block1 = self.block1(x) block2 = self.block2(block1) block3 = self.block3(block2) block4 = self.block4(block3) block5 = self.block5(block4) block6 = self.block6(block5) block7 = self.block7(block6) block8 = self.block8(block1 + block7) return (torch.tanh(block8) + 1) / 2 """ def forward(self, input_0): primals_1 = self.e_conv1.weight primals_2 = self.e_conv1.bias primals_4 = self.e_conv2.weight primals_5 = self.e_conv2.bias primals_6 = self.e_conv3.weight primals_7 = self.e_conv3.bias primals_8 = self.e_conv4.weight primals_9 = self.e_conv4.bias primals_10 = self.e_conv5.weight primals_11 = self.e_conv5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
goldenbili/SRGAN_Test
Generator
false
10,198
[ "MIT" ]
0
06705c92abd5b7084ae878a4746060760bcff5c3
https://github.com/goldenbili/SRGAN_Test/tree/06705c92abd5b7084ae878a4746060760bcff5c3
HLCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lm/clmt2msuhfklumqpp6nglkxr3w4zcfur55mrrpiel4yeric2fpc2.py # Topologically Sorted Source Nodes: [softmax, si, softmax_1, st, loss, loss_1], Original ATen: [aten._softmax, aten.sqrt, aten.mse_loss, aten.mul] # Source node to ATen node mapping: # loss => mean, pow_1, sub_2 # loss_1 => mul # si => sqrt # softmax => div, sum_1 # softmax_1 => div_1, sum_2 # st => sqrt_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%div,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%div_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sqrt, %sqrt_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_red_fused__softmax_mse_loss_mul_sqrt_1 = async_compile.triton('triton_red_fused__softmax_mse_loss_mul_sqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__softmax_mse_loss_mul_sqrt_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__softmax_mse_loss_mul_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp23 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.sqrt(tmp8) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp10 / tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tmp9 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = _tmp23 + tmp22 _tmp23 = tl.where(rmask, tmp24, _tmp23) tmp23 = tl.sum(_tmp23, 1)[:, None] tmp25 = 256.0 tmp26 = tmp23 / tmp25 tmp27 = 1.0 tmp28 = tmp26 * tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, si, softmax_1, st, loss, loss_1], Original ATen: [aten._softmax, aten.sqrt, aten.mse_loss, aten.mul] triton_red_fused__softmax_mse_loss_mul_sqrt_1.run(buf4, buf0, buf1, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class HLCriterion(Criterion): def __init__(self, alpha=1.0, name='Hellinger Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1, reduction='batchmean'): """input/target: logits """ input = input.float() target = target.float() si = F.softmax(target.detach(), dim=-1, dtype=torch.float32).sqrt_() st = F.softmax(input.detach(), dim=-1, dtype=torch.float32).sqrt_() loss = F.mse_loss(si, st) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_red_fused__softmax_mse_loss_mul_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp23 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.sqrt(tmp8) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp10 / tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tmp9 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = _tmp23 + tmp22 _tmp23 = tl.where(rmask, tmp24, _tmp23) tmp23 = tl.sum(_tmp23, 1)[:, None] tmp25 = 256.0 tmp26 = tmp23 / tmp25 tmp27 = 1.0 tmp28 = tmp26 * tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__softmax_mse_loss_mul_sqrt_1[grid(1)](buf4, buf0, buf1, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf1 return buf4, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class HLCriterionNew(Criterion): def __init__(self, alpha=1.0, name='Hellinger Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
HLCriterion
false
10,199
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
Cosine
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/65/c65fzntschmihaixi34nxrnc3ebvynor7gvbhgvxkbc5w2qsknbs.py # Topologically Sorted Source Nodes: [norm, norm_1, mul, add], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # norm => pow_1, pow_2, sum_1 # norm_1 => pow_3, pow_4, sum_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-09), kwargs = {}) triton_poi_fused_add_linalg_vector_norm_mul_1 = async_compile.triton('triton_poi_fused_add_linalg_vector_norm_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_linalg_vector_norm_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tmp11 * tmp23 tmp25 = 1e-09 tmp26 = tmp24 + tmp25 tl.store(out_ptr0 + (x0), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bh/cbhnrrn4ctzyyoqx65k45oybf5e5pftynoyjwxgz6u3k34xuioxs.py # Topologically Sorted Source Nodes: [norm, norm_1, mul, add, truediv, squeeze], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add, aten.div, aten.squeeze] # Source node to ATen node mapping: # add => add # mul => mul # norm => pow_1, pow_2, sum_1 # norm_1 => pow_3, pow_4, sum_2 # squeeze => squeeze # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-09), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %add), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%div,), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [norm, norm_1, mul, add], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add] triton_poi_fused_add_linalg_vector_norm_mul_1.run(arg0_1, arg1_1, buf2, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [norm, norm_1, mul, add, truediv, squeeze], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.add, aten.div, aten.squeeze] triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0) del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch.optim.lr_scheduler import * class Cosine(torch.nn.Module): def __init__(self, config): super().__init__() def forward(self, src, tgt): src = src.float() tgt = tgt.float() return (torch.matmul(src, tgt.transpose(2, 1)) / (src.norm(p=2, dim =-1, keepdim=True) * tgt.norm(p=2, dim=-1, keepdim=True) + 1e-09) ).squeeze() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tmp11 * tmp23 tmp25 = 1e-09 tmp26 = tmp24 + tmp25 tl.store(out_ptr0 + x0, tmp26, xmask) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_linalg_vector_norm_mul_1[grid(64)](arg0_1, arg1_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_div_linalg_vector_norm_mul_squeeze_2[grid(256)]( buf3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, class CosineNew(torch.nn.Module): def __init__(self, config): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
Cosine
false
10,200
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
BiLinearSim
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src_], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_3, buf1, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf0 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch.optim.lr_scheduler import * class BiLinearSim(torch.nn.Module): def __init__(self, config): super().__init__() self.linear = torch.nn.Linear(config.hidden_size, config. hidden_size, bias=False) def forward(self, src, tgt): src_ = self.linear(src) output = torch.matmul(src_, tgt.transpose(2, 1)) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_3, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf0 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0) class BiLinearSimNew(torch.nn.Module): def __init__(self, config): super().__init__() self.linear = torch.nn.Linear(config.hidden_size, config. hidden_size, bias=False) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
chunhuililili/mt_dnn
BiLinearSim
false
10,201
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
JSCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py # Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax] # Source node to ATen node mapping: # log_softmax_1 => amax_3, sub_5 # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_3), kwargs = {}) triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ew/cewejal4rpvwi6jnzulqnyb3ftv6rl774lzryrdhbo5q76xi6iai.py # Topologically Sorted Source Nodes: [softmax, softmax_1, m, m_1, kl_div, log_softmax, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.add, aten.mul, aten.xlogy, aten._log_softmax, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # kl_div => div_2, eq, full_default, full_default_1, isnan, log_1, mul_1, mul_2, sub_4, sum_4, where, where_1 # kl_div_1 => div_3, eq_1, full_default_2, full_default_3, isnan_1, log_3, mul_3, mul_4, sub_7, sum_6, where_2, where_3 # log_softmax => exp_2, log, sub_3, sum_3 # log_softmax_1 => exp_3, log_2, sub_6, sum_5 # loss => add_1 # loss_1 => mul_5 # m => add # m_1 => mul # softmax => div, sum_1 # softmax_1 => div_1, sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {}) # %mul : [num_users=10] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%mul,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%mul, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_2), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul_1), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_4,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%mul,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%mul, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log_3), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_5,), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1], True), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_5,), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %log_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub_6), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_3), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_7,), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, %div_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {}) triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1 = async_compile.triton('triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp46 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp10 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp48 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp49 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp54 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp9 / tmp16 tmp18 = tmp8 + tmp17 tmp19 = 0.5 tmp20 = tmp18 * tmp19 tmp21 = libdevice.isnan(tmp20).to(tl.int1) tmp22 = 0.0 tmp23 = tmp20 == tmp22 tmp24 = tl_math.log(tmp20) tmp25 = tmp20 * tmp24 tmp26 = tl.where(tmp23, tmp22, tmp25) tmp27 = float("nan") tmp28 = tl.where(tmp21, tmp27, tmp26) tmp31 = tl_math.exp(tmp30) tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tmp41 = tl_math.log(tmp40) tmp42 = tmp29 - tmp41 tmp43 = tmp20 * tmp42 tmp44 = tmp28 - tmp43 tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = _tmp46 + tmp45 _tmp46 = tl.where(rmask, tmp47, _tmp46) tmp50 = tl_math.exp(tmp49) tmp52 = tl_math.exp(tmp51) tmp53 = tmp50 + tmp52 tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp60 = tl_math.log(tmp59) tmp61 = tmp48 - tmp60 tmp62 = tmp20 * tmp61 tmp63 = tmp28 - tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask, tmp66, _tmp65) tmp46 = tl.sum(_tmp46, 1)[:, None] tmp65 = tl.sum(_tmp65, 1)[:, None] tmp67 = 0.25 tmp68 = tmp46 * tmp67 tmp69 = tmp65 * tmp67 tmp70 = tmp68 + tmp69 tmp71 = 1.0 tmp72 = tmp70 * tmp71 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp72, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf5, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax_1, log_softmax], Original ATen: [aten._softmax, aten._log_softmax] triton_poi_fused__log_softmax__softmax_0.run(arg0_1, buf1, buf3, 256, grid=grid(256), stream=stream0) del arg0_1 buf4 = empty_strided_cuda((), (), torch.float32) buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [softmax, softmax_1, m, m_1, kl_div, log_softmax, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.add, aten.mul, aten.xlogy, aten._log_softmax, aten.sub, aten.sum, aten.div] triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1.run(buf7, buf0, buf1, buf3, buf5, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf1 del buf3 del buf5 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class JSCriterion(Criterion): def __init__(self, alpha=1.0, name='JS Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1, reduction='batchmean'): """input/target: logits """ input = input.float() target = target.float() m = F.softmax(target.detach(), dim=-1, dtype=torch.float32 ) + F.softmax(input.detach(), dim=-1, dtype=torch.float32) m = 0.5 * m loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32), m, reduction=reduction) + F.kl_div(F.log_softmax(target, dim=-1, dtype=torch.float32), m, reduction=reduction) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp46 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp10 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp29 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp32 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp48 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp49 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp54 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp9 / tmp16 tmp18 = tmp8 + tmp17 tmp19 = 0.5 tmp20 = tmp18 * tmp19 tmp21 = libdevice.isnan(tmp20).to(tl.int1) tmp22 = 0.0 tmp23 = tmp20 == tmp22 tmp24 = tl_math.log(tmp20) tmp25 = tmp20 * tmp24 tmp26 = tl.where(tmp23, tmp22, tmp25) tmp27 = float('nan') tmp28 = tl.where(tmp21, tmp27, tmp26) tmp31 = tl_math.exp(tmp30) tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tmp41 = tl_math.log(tmp40) tmp42 = tmp29 - tmp41 tmp43 = tmp20 * tmp42 tmp44 = tmp28 - tmp43 tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = _tmp46 + tmp45 _tmp46 = tl.where(rmask, tmp47, _tmp46) tmp50 = tl_math.exp(tmp49) tmp52 = tl_math.exp(tmp51) tmp53 = tmp50 + tmp52 tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp60 = tl_math.log(tmp59) tmp61 = tmp48 - tmp60 tmp62 = tmp20 * tmp61 tmp63 = tmp28 - tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask, tmp66, _tmp65) tmp46 = tl.sum(_tmp46, 1)[:, None] tmp65 = tl.sum(_tmp65, 1)[:, None] tmp67 = 0.25 tmp68 = tmp46 * tmp67 tmp69 = tmp65 * tmp67 tmp70 = tmp68 + tmp69 tmp71 = 1.0 tmp72 = tmp70 * tmp71 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp72, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((), (), torch.float32) buf7 = buf4 del buf4 triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_1[grid (1)](buf7, buf0, buf1, buf3, buf5, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf1 del buf3 del buf5 return buf7, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class JSCriterionNew(Criterion): def __init__(self, alpha=1.0, name='JS Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
JSCriterion
false
10,202
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
KlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/g5/cg5f2rptqnpi2mrqpqc4tujqpbrrrjrse6plhgftx425znsffpfv.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uf/cufuuxvelrltzwxjbbkikdu25p6qhp4n3izwy7jc75fb6567imxf.py # Topologically Sorted Source Nodes: [softmax, loss, log_softmax, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # loss => div_1, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # loss_1 => mul_2 # softmax => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 1.0), kwargs = {}) triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 0.25 tmp37 = tmp34 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, loss, log_softmax, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class KlCriterion(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """input/target: logits """ input = input.float() target = target.float() loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32), F.softmax(target, dim=-1, dtype=torch.float32), reduction= 'batchmean') loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 0.25 tmp37 = tmp34 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 return buf4, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class KlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
KlCriterion
false
10,203
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
Mnist_CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wr/cwrjr75ksmgrel4djhr27volr7bzoob6ixyduxm74y2ky6udtlzn.py # Topologically Sorted Source Nodes: [conv2d, xb_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # xb_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 196) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/iz/cizeqrrbvjbeoqr7ojtcvnagh6alsvquwz2qlmnzzbytbq77tnn4.py # Topologically Sorted Source Nodes: [conv2d_1, xb_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # xb_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 49) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/23/c237kritlyjcuojdcyxxqqygn46332pp2fgszsmeqpe2chj5uwq2.py # Topologically Sorted Source Nodes: [conv2d_2, xb_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # xb_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rr/crrealymt6xnsywuzmxru2w3tcsoludjyewecl5ik2qqtmaogdt2.py # Topologically Sorted Source Nodes: [xb_4], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # xb_4 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%relu_2, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, xb_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 12544, grid=grid(12544), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, xb_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 3136, grid=grid(3136), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, xb_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 640, grid=grid(640), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [xb_4], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_3.run(buf5, buf6, 40, grid=grid(40), stream=stream0) return (reinterpret_tensor(buf6, (4, 10), (10, 1), 0), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.quantization import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Mnist_CNN(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1) def forward(self, xb): xb = xb.view(-1, 1, 28, 28) xb = F.relu(self.conv1(xb)) xb = F.relu(self.conv2(xb)) xb = F.relu(self.conv3(xb)) xb = F.avg_pool2d(xb, 4) return xb.view(-1, xb.size(1)) def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 49 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(12544)](buf1, primals_3, 12544, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(3136)](buf3, primals_5, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(640)](buf5, primals_7, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32) triton_poi_fused_avg_pool2d_3[grid(40)](buf5, buf6, 40, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf6, (4, 10), (10, 1), 0 ), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5 class Mnist_CNNNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
hongsam123/PyTorch-tutorials-kr
Mnist_CNN
false
10,204
[ "BSD-3-Clause" ]
0
e48bbbc7088bf6b9da66abb8862b8d0539662bd5
https://github.com/hongsam123/PyTorch-tutorials-kr/tree/e48bbbc7088bf6b9da66abb8862b8d0539662bd5
Pooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone] # Source node to ATen node mapping: # pooled_output => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7b/c7bf34fgn2dhohe7ejneqlees25vyq6sbe4c5lfvoehzliak2nz6.py # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.add] # Source node to ATen node mapping: # pooled_output => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn from torch.optim.lr_scheduler import * def linear(x): return x def activation(func_a): """Activation function wrapper """ try: f = eval(func_a) except: f = linear return f class DropoutWrapper(nn.Module): """ This is a dropout wrapper which supports the fix mask dropout """ def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() """variational dropout means fix dropout mask ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11 """ self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): """ :param x: batch * len * input_size """ if self.training is False or self.dropout_p == 0: return x if len(x.size()) == 3: mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self. dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)) mask.requires_grad = False return mask.unsqueeze(1).expand_as(x) * x else: return F.dropout(x, p=self.dropout_p, training=self.training) class Pooler(nn.Module): def __init__(self, hidden_size, dropout_p=0.1, actf='tanh'): super(Pooler, self).__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.activation = activation(actf) self.dropout = DropoutWrapper(dropout_p=dropout_p) def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] first_token_tensor = self.dropout(first_token_tensor) pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0) def linear(x): return x def activation(func_a): """Activation function wrapper """ try: f = eval(func_a) except: f = linear return f class DropoutWrapper(nn.Module): """ This is a dropout wrapper which supports the fix mask dropout """ def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() """variational dropout means fix dropout mask ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11 """ self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): """ :param x: batch * len * input_size """ if self.training is False or self.dropout_p == 0: return x if len(x.size()) == 3: mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self. dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)) mask.requires_grad = False return mask.unsqueeze(1).expand_as(x) * x else: return F.dropout(x, p=self.dropout_p, training=self.training) class PoolerNew(nn.Module): def __init__(self, hidden_size, dropout_p=0.1, actf='tanh'): super(PoolerNew, self).__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.activation = activation(actf) self.dropout = DropoutWrapper(dropout_p=dropout_p) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
chunhuililili/mt_dnn
Pooler
false
10,205
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
NsKlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wh/cwhvriz6hxksjnf7xkq6bdjc3fmcw4ahhhtmiub5rzoghqztmqjv.py # Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, loss, loss_1], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # log => log_2 # log_1 => log_3 # log_softmax => exp, log, sub_1, sum_1 # log_softmax_1 => exp_2, log_1, sub_3, sum_2 # loss => div # loss_1 => mul_4 # mul => mul_2 # mul_1 => mul_3 # p => exp_1 # rp => neg # ry => neg_1 # sub => sub_4 # sub_1 => sub_5 # sub_2 => sub_6 # sum_1 => sum_3 # truediv => mul, reciprocal # truediv_1 => mul_1, reciprocal_1 # y => exp_3 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1e-06), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_4, 1e-06), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_2,), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_1), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_3, 1e-06), kwargs = {}) # %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1.0), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-06), kwargs = {}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_3,), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg, %neg_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_1, %sub_6), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 64), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {}) triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1 = async_compile.triton('triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp26 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp34 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tl.full([1, 1], 1, tl.int32) tmp18 = tmp17 / tmp16 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tmp21 = tmp20 - tmp19 tmp22 = tmp21 + tmp15 tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp27 = tl_math.exp(tmp26) tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tl_math.log(tmp36) tmp38 = tmp25 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp39 + tmp15 tmp41 = tmp17 / tmp40 tmp42 = tmp41 * tmp19 tmp43 = tmp42 - tmp19 tmp44 = tmp43 + tmp15 tmp45 = tl_math.log(tmp44) tmp46 = -tmp45 tmp47 = tmp24 - tmp46 tmp48 = tmp14 * tmp47 tmp49 = 2.0 tmp50 = tmp48 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = _tmp52 + tmp51 _tmp52 = tl.where(rmask, tmp53, _tmp52) tmp52 = tl.sum(_tmp52, 1)[:, None] tmp54 = 0.015625 tmp55 = tmp52 * tmp54 tmp56 = 1.0 tmp57 = tmp55 * tmp56 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp57, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax_1], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_0.run(arg1_1, buf2, 256, grid=grid(256), stream=stream0) del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, loss, loss_1], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div] triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1.run(buf5, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * def stable_kl(logit, target, epsilon=1e-06, reduce=True): logit = logit.view(-1, logit.size(-1)).float() target = target.view(-1, target.size(-1)).float() bs = logit.size(0) p = F.log_softmax(logit, 1).exp() y = F.log_softmax(target, 1).exp() rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log() ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log() if reduce: return (p * (rp - ry) * 2).sum() / bs else: return (p * (rp - ry) * 2).sum() class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class NsKlCriterion(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """input/target: logits """ input = input.float() target = target.float() loss = stable_kl(input, target.detach()) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp26 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp34 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tl.full([1, 1], 1, tl.int32) tmp18 = tmp17 / tmp16 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tmp21 = tmp20 - tmp19 tmp22 = tmp21 + tmp15 tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp27 = tl_math.exp(tmp26) tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tl_math.log(tmp36) tmp38 = tmp25 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp39 + tmp15 tmp41 = tmp17 / tmp40 tmp42 = tmp41 * tmp19 tmp43 = tmp42 - tmp19 tmp44 = tmp43 + tmp15 tmp45 = tl_math.log(tmp44) tmp46 = -tmp45 tmp47 = tmp24 - tmp46 tmp48 = tmp14 * tmp47 tmp49 = 2.0 tmp50 = tmp48 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = _tmp52 + tmp51 _tmp52 = tl.where(rmask, tmp53, _tmp52) tmp52 = tl.sum(_tmp52, 1)[:, None] tmp54 = 0.015625 tmp55 = tmp52 * tmp54 tmp56 = 1.0 tmp57 = tmp55 * tmp56 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp57, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1[ grid(1)](buf5, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 return buf5, def stable_kl(logit, target, epsilon=1e-06, reduce=True): logit = logit.view(-1, logit.size(-1)).float() target = target.view(-1, target.size(-1)).float() bs = logit.size(0) p = F.log_softmax(logit, 1).exp() y = F.log_softmax(target, 1).exp() rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log() ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log() if reduce: return (p * (rp - ry) * 2).sum() / bs else: return (p * (rp - ry) * 2).sum() class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class NsKlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
NsKlCriterion
false
10,206
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
CeCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4s/c4sinl6spiqxhpvyucixluedc7ia6rt4kmscnsrfq6ef6uymhfpa.py # Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # loss_1 => mul_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class CeCriterion(Criterion): def __init__(self, alpha=1.0, name='Cross Entropy Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ if weight: loss = torch.mean(F.cross_entropy(input, target, reduce=False, ignore_index=ignore_index) * weight) else: loss = F.cross_entropy(input, target, ignore_index=ignore_index) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class CeCriterionNew(Criterion): def __init__(self, alpha=1.0, name='Cross Entropy Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
CeCriterion
false
10,207
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
MseCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7p/c7po76azmxaenhqdhxmjkwmmyrh5jpr4p5zydgfurvfy3q5qm6gp.py # Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten.mse_loss, aten.mul] # Source node to ATen node mapping: # loss => mean, pow_1, sub # loss_1 => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten.mse_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class MseCriterion(Criterion): def __init__(self, alpha=1.0, name='MSE Regression Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ if weight: loss = torch.mean(F.mse_loss(input.squeeze(), target, reduce= False) * weight.reshape((target.shape[0], 1))) else: loss = F.mse_loss(input.squeeze(), target) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class MseCriterionNew(Criterion): def __init__(self, alpha=1.0, name='MSE Regression Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
MseCriterion
false
10,208
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
MultiheadAttentionWrapper
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/m5/cm5f2v4luzyczo3xcytmjg2h6fymvgarwaaqswwrj34olun37r6i.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %_scaled_dot_product_efficient_attention_default : [num_users=1] = call_function[target=torch.ops.aten._scaled_dot_product_efficient_attention.default](args = (%unsqueeze_default, %unsqueeze_default_1, %unsqueeze_default_2, None, False), kwargs = {scale: 1.0}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(arg1_1, buf1, 64, grid=grid(64), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(arg2_1, buf2, 64, grid=grid(64), stream=stream0) del arg2_1 # Topologically Sorted Source Nodes: [], Original ATen: [] buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf0, buf1, buf2, None, False, scale=1.0) del buf0 del buf1 del buf2 buf4 = buf3[0] del buf3 return (reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn.utils import weight_norm from torch.optim.lr_scheduler import * def linear(x): return x def activation(func_a): """Activation function wrapper """ try: f = eval(func_a) except: f = linear return f class DropoutWrapper(nn.Module): """ This is a dropout wrapper which supports the fix mask dropout """ def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() """variational dropout means fix dropout mask ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11 """ self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): """ :param x: batch * len * input_size """ if self.training is False or self.dropout_p == 0: return x if len(x.size()) == 3: mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self. dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)) mask.requires_grad = False return mask.unsqueeze(1).expand_as(x) * x else: return F.dropout(x, p=self.dropout_p, training=self.training) class MultiheadAttentionWrapper(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, query_dim, key_dim, value_dim, prefix='attention', opt={}, dropout=None): super().__init__() self.prefix = prefix self.num_heads = opt.get('{}_head'.format(self.prefix), 1) self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self. prefix), 0)) if dropout is None else dropout self.qkv_dim = [query_dim, key_dim, value_dim] assert query_dim == key_dim, 'query dim must equal with key dim' self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64) self.proj_on = opt.get('{}_proj_on'.format(prefix), False) self.share = opt.get('{}_share'.format(self.prefix), False) self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False) self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False) if self.proj_on: self.proj_modules = nn.ModuleList([nn.Linear(dim, self. hidden_size) for dim in self.qkv_dim[0:2]]) if self.layer_norm_on: for proj in self.proj_modules: proj = weight_norm(proj) if self.share and self.qkv_dim[0] == self.qkv_dim[1]: self.proj_modules[1] = self.proj_modules[0] self.f = activation(opt.get('{}_activation'.format(self.prefix), 'relu')) self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3 self.qkv_head_dim[2] = value_dim // self.num_heads assert self.qkv_head_dim[0 ] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads' assert self.qkv_head_dim[2 ] * self.num_heads == value_dim, 'value size must be divisible by num_heads' else: self.qkv_head_dim = [(emb // self.num_heads) for emb in self. qkv_dim] assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0 ], 'query size must be divisible by num_heads' assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1 ], 'key size must be divisible by num_heads' assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2 ], 'value size must be divisible by num_heads' if self.scale_on: self.scaling = self.qkv_head_dim[0] ** -0.5 self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix), False) self.output_size = self.qkv_dim[2] def forward(self, query, key, value, key_padding_mask=None): query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.qkv_dim[0] q, k, v = query, key, value if self.proj_on: if self.dropout: q, k = self.dropout(q), self.dropout(k) q, k = [self.f(proj(input)) for input, proj in zip([query, key], self.proj_modules)] src_len = k.size(0) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.scale_on: q *= self.scaling q = q.contiguous().view(tgt_len, bsz * self.num_heads, self. qkv_head_dim[0]).transpose(0, 1) k = k.contiguous().view(src_len, bsz * self.num_heads, self. qkv_head_dim[1]).transpose(0, 1) v = v.contiguous().view(src_len, bsz * self.num_heads, self. qkv_head_dim[2]).transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.float().masked_fill(key_padding_mask .unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if self.drop_diagonal: assert attn_weights.size(1) == attn_weights.size(2) diag_mask = torch.diag(attn_weights.data.new(attn_weights.size( 1)).zero_() + 1).byte().unsqueeze(0).expand_as(attn_weights) attn_weights.data.masked_fill_(diag_mask, -float('inf')) attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as( attn_weights) attn_weights = self.dropout(attn_weights) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. qkv_head_dim[2]] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, -1) attn = attn.transpose(0, 1) return attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'query_dim': 4, 'key_dim': 4, 'value_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.nn.utils import weight_norm from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp0, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32) triton_poi_fused_0[grid(64)](arg1_1, buf1, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32) triton_poi_fused_0[grid(64)](arg2_1, buf2, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg2_1 buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default( buf0, buf1, buf2, None, False, scale=1.0) del buf0 del buf1 del buf2 buf4 = buf3[0] del buf3 return reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0), def linear(x): return x def activation(func_a): """Activation function wrapper """ try: f = eval(func_a) except: f = linear return f class DropoutWrapper(nn.Module): """ This is a dropout wrapper which supports the fix mask dropout """ def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() """variational dropout means fix dropout mask ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11 """ self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): """ :param x: batch * len * input_size """ if self.training is False or self.dropout_p == 0: return x if len(x.size()) == 3: mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self. dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)) mask.requires_grad = False return mask.unsqueeze(1).expand_as(x) * x else: return F.dropout(x, p=self.dropout_p, training=self.training) class MultiheadAttentionWrapperNew(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, query_dim, key_dim, value_dim, prefix='attention', opt={}, dropout=None): super().__init__() self.prefix = prefix self.num_heads = opt.get('{}_head'.format(self.prefix), 1) self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self. prefix), 0)) if dropout is None else dropout self.qkv_dim = [query_dim, key_dim, value_dim] assert query_dim == key_dim, 'query dim must equal with key dim' self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64) self.proj_on = opt.get('{}_proj_on'.format(prefix), False) self.share = opt.get('{}_share'.format(self.prefix), False) self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False) self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False) if self.proj_on: self.proj_modules = nn.ModuleList([nn.Linear(dim, self. hidden_size) for dim in self.qkv_dim[0:2]]) if self.layer_norm_on: for proj in self.proj_modules: proj = weight_norm(proj) if self.share and self.qkv_dim[0] == self.qkv_dim[1]: self.proj_modules[1] = self.proj_modules[0] self.f = activation(opt.get('{}_activation'.format(self.prefix), 'relu')) self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3 self.qkv_head_dim[2] = value_dim // self.num_heads assert self.qkv_head_dim[0 ] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads' assert self.qkv_head_dim[2 ] * self.num_heads == value_dim, 'value size must be divisible by num_heads' else: self.qkv_head_dim = [(emb // self.num_heads) for emb in self. qkv_dim] assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0 ], 'query size must be divisible by num_heads' assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1 ], 'key size must be divisible by num_heads' assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2 ], 'value size must be divisible by num_heads' if self.scale_on: self.scaling = self.qkv_head_dim[0] ** -0.5 self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix), False) self.output_size = self.qkv_dim[2] def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
chunhuililili/mt_dnn
MultiheadAttentionWrapper
false
10,209
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
Network
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out => convolution # out_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/fd/cfd3q6x464cl7vept5hkzz2av6ogtk5up7qocwljfaq5nbponjso.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bq/cbqph7b6z5pkvjbyl5xcehcmtf5gj4cge7to6mumw5i6vt6xp2db.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # out_5 => relu_1 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kd/ckdg574tplsphiaxjgd3mlzczq6o6slvwdi62ykbk6k52i3tdbov.py # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.relu] # Source node to ATen node mapping: # out_6 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wf/cwf7idkvctcz726nblespeq5hl4v2rgvawdqwncw2xagtfwrsrfi.py # Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_7 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (120, 1600), (1600, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (4, 1600), (1600, 1), 0), reinterpret_tensor(primals_6, (1600, 120), (1, 1600), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu] triton_poi_fused_relu_3.run(buf7, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf8 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), out=buf10) buf11 = buf10; del buf10 # reuse buf12 = empty_strided_cuda((4, 10), (10, 1), torch.bool) # Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_5.run(buf11, primals_11, buf12, 40, grid=grid(40), stream=stream0) del primals_11 return (buf11, primals_1, primals_3, primals_4, buf1, buf2, buf3, reinterpret_tensor(buf5, (4, 1600), (1600, 1), 0), buf7, buf9, buf12, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 1600), (1600, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Network(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 10 * 10, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): out = self.conv1(x) out = F.relu(out) out = self.pool(out) out = self.conv2(out) out = out.view(-1, 16 * 10 * 10) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = F.relu(self.fc3(out)) return out def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 1600), (1600, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (4, 1600), (1600, 1), 0), reinterpret_tensor(primals_6, (1600, 120), (1, 1600), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_3[grid(480)](buf7, primals_7, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf8 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(336)](buf9, primals_9, 336, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), out=buf10) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(40)](buf11, primals_11, buf12, 40, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 return (buf11, primals_1, primals_3, primals_4, buf1, buf2, buf3, reinterpret_tensor(buf5, (4, 1600), (1600, 1), 0), buf7, buf9, buf12, primals_10, primals_8, primals_6) class NetworkNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 10 * 10, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
ibrahimalmakky/py4ai
Network
false
10,210
[ "MIT" ]
0
224f54086523314ff9c7133680f119c62f6ea249
https://github.com/ibrahimalmakky/py4ai/tree/224f54086523314ff9c7133680f119c62f6ea249
ComplexConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hu/chuyhfal6unm2glvnv2o25jncg7prpzpoibqrpgzq2pm2ai5ww6k.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] # Source node to ATen node mapping: # output => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %add], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr3 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tmp7 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 8, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr0 + ((4*x1) + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + ((-4) + x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr3 + ((-4) + x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp14, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp13, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 8, 4), (32, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1), (4, 1, 1)) # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 16), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) buf2 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_3, buf1, primals_5, buf2, 32, grid=grid(32), stream=stream0) del buf0 del buf1 del primals_3 del primals_5 return (buf2, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 0), reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 16), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 8, 4), (32, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ComplexConv(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(ComplexConv, self).__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.padding = padding self.conv_re = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.conv_im = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) def forward(self, x): n = x.size()[1] m = int(n / 2) x_real = x[:, :m] x_imag = x[:, m:] real = self.conv_re(x_real) - self.conv_im(x_imag) imaginary = self.conv_re(x_real) + self.conv_im(x_imag) output = torch.cat((real, imaginary), dim=1) return output def get_inputs(): return [torch.rand([4, 8, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr3 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tmp7 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp17 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + (-4 + x0), tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr3 + (-4 + x0), tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp14, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp13, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 8, 4), (32, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1), (4, 1, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 16), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) buf2 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, primals_3, buf1, primals_5, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf0 del buf1 del primals_3 del primals_5 return buf2, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 0), reinterpret_tensor(primals_1, (4, 4, 4), (32, 4, 1), 16) class ComplexConvNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(ComplexConvNew, self).__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.padding = padding self.conv_re = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.conv_im = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) def forward(self, input_0): primals_2 = self.conv_re.weight primals_3 = self.conv_re.bias primals_4 = self.conv_im.weight primals_5 = self.conv_im.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
iseeklin/Electromagnetic-Signal-Recognition-Using-Deep-Learning
ComplexConv
false
10,211
[ "Apache-2.0" ]
0
be78a2d966f33fd90567b21295cda1c1d472e14a
https://github.com/iseeklin/Electromagnetic-Signal-Recognition-Using-Deep-Learning/tree/be78a2d966f33fd90567b21295cda1c1d472e14a
NsSymKlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/m3/cm3oyfplxv4jfq4lzivy2ytiixh7r3jdutou2fi3p7bnl5g734sl.py # Topologically Sorted Source Nodes: [log_softmax, log_softmax_3], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # log_softmax_3 => amax_3, sub_9 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax_3), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dc/cdcsseiicjcgc6sqlyyouett3fn2xszalsbnt3viybno45khl7l3.py # Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, truediv_2, log_softmax_2, p_1, add_4, truediv_3, sub_3, add_5, log_2, rp_1, log_softmax_3, y_1, add_6, truediv_4, sub_4, add_7, log_3, ry_1, sub_5, mul_2, mul_3, sum_2, truediv_5, loss, loss_1], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # add_7 => add_7 # log => log_2 # log_1 => log_3 # log_2 => log_6 # log_3 => log_7 # log_softmax => exp, log, sub_1, sum_1 # log_softmax_1 => exp_2, log_1, sub_3, sum_2 # log_softmax_2 => exp_4, log_4, sub_8, sum_4 # log_softmax_3 => exp_6, log_5, sub_10, sum_5 # loss => add_8 # loss_1 => mul_8 # mul => mul_2 # mul_1 => mul_3 # mul_2 => mul_6 # mul_3 => mul_7 # p => exp_1 # p_1 => exp_5 # rp => neg # rp_1 => neg_2 # ry => neg_1 # ry_1 => neg_3 # sub => sub_4 # sub_1 => sub_5 # sub_2 => sub_6 # sub_3 => sub_11 # sub_4 => sub_12 # sub_5 => sub_13 # sum_1 => sum_3 # sum_2 => sum_6 # truediv => mul, reciprocal # truediv_1 => mul_1, reciprocal_1 # truediv_2 => div # truediv_3 => mul_4, reciprocal_2 # truediv_4 => mul_5, reciprocal_3 # truediv_5 => div_1 # y => exp_3 # y_1 => exp_7 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1e-06), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_4, 1e-06), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_2,), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_1), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_3, 1e-06), kwargs = {}) # %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1.0), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-06), kwargs = {}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_3,), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg, %neg_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_1, %sub_6), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 64), kwargs = {}) # %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_7,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [1], True), kwargs = {}) # %log_4 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_7, %log_4), kwargs = {}) # %exp_5 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_8,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_5, 1e-06), kwargs = {}) # %reciprocal_2 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_4,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_2, 1.0), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, 1), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_11, 1e-06), kwargs = {}) # %log_6 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_5,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_6,), kwargs = {}) # %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_9,), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [1], True), kwargs = {}) # %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_5,), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_9, %log_5), kwargs = {}) # %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_7, 1e-06), kwargs = {}) # %reciprocal_3 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_6,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_3, 1.0), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 1), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_12, 1e-06), kwargs = {}) # %log_7 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_7,), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_7,), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %neg_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_5, %sub_13), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, 2), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_7,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 64), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 1.0), kwargs = {}) triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1 = async_compile.triton('triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp102 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp26 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp34 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp54 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp55 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp63 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp79 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tl.full([1, 1], 1, tl.int32) tmp18 = tmp17 / tmp16 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tmp21 = tmp20 - tmp19 tmp22 = tmp21 + tmp15 tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp27 = tl_math.exp(tmp26) tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tl_math.log(tmp36) tmp38 = tmp25 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp39 + tmp15 tmp41 = tmp17 / tmp40 tmp42 = tmp41 * tmp19 tmp43 = tmp42 - tmp19 tmp44 = tmp43 + tmp15 tmp45 = tl_math.log(tmp44) tmp46 = -tmp45 tmp47 = tmp24 - tmp46 tmp48 = tmp14 * tmp47 tmp49 = 2.0 tmp50 = tmp48 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = _tmp52 + tmp51 _tmp52 = tl.where(rmask, tmp53, _tmp52) tmp56 = tl_math.exp(tmp55) tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp64 = tl_math.exp(tmp63) tmp65 = tmp62 + tmp64 tmp66 = tl_math.log(tmp65) tmp67 = tmp54 - tmp66 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 + tmp15 tmp70 = tmp17 / tmp69 tmp71 = tmp70 * tmp19 tmp72 = tmp71 - tmp19 tmp73 = tmp72 + tmp15 tmp74 = tl_math.log(tmp73) tmp75 = -tmp74 tmp78 = tl_math.exp(tmp77) tmp80 = tl_math.exp(tmp79) tmp81 = tmp78 + tmp80 tmp83 = tl_math.exp(tmp82) tmp84 = tmp81 + tmp83 tmp86 = tl_math.exp(tmp85) tmp87 = tmp84 + tmp86 tmp88 = tl_math.log(tmp87) tmp89 = tmp76 - tmp88 tmp90 = tl_math.exp(tmp89) tmp91 = tmp90 + tmp15 tmp92 = tmp17 / tmp91 tmp93 = tmp92 * tmp19 tmp94 = tmp93 - tmp19 tmp95 = tmp94 + tmp15 tmp96 = tl_math.log(tmp95) tmp97 = -tmp96 tmp98 = tmp75 - tmp97 tmp99 = tmp68 * tmp98 tmp100 = tmp99 * tmp49 tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK]) tmp103 = _tmp102 + tmp101 _tmp102 = tl.where(rmask, tmp103, _tmp102) tmp52 = tl.sum(_tmp52, 1)[:, None] tmp102 = tl.sum(_tmp102, 1)[:, None] tmp104 = 0.015625 tmp105 = tmp52 * tmp104 tmp106 = tmp102 * tmp104 tmp107 = tmp105 + tmp106 tmp108 = 1.0 tmp109 = tmp107 * tmp108 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp109, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax, log_softmax_3], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, buf7, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax_1, log_softmax_2], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_0.run(arg1_1, buf2, buf5, 256, grid=grid(256), stream=stream0) del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf10 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, truediv_2, log_softmax_2, p_1, add_4, truediv_3, sub_3, add_5, log_2, rp_1, log_softmax_3, y_1, add_6, truediv_4, sub_4, add_7, log_3, ry_1, sub_5, mul_2, mul_3, sum_2, truediv_5, loss, loss_1], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div] triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1.run(buf10, buf0, buf2, buf5, buf7, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 del buf5 del buf7 return (buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * def stable_kl(logit, target, epsilon=1e-06, reduce=True): logit = logit.view(-1, logit.size(-1)).float() target = target.view(-1, target.size(-1)).float() bs = logit.size(0) p = F.log_softmax(logit, 1).exp() y = F.log_softmax(target, 1).exp() rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log() ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log() if reduce: return (p * (rp - ry) * 2).sum() / bs else: return (p * (rp - ry) * 2).sum() class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class NsSymKlCriterion(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """input/target: logits """ input = input.float() target = target.float() loss = stable_kl(input, target.detach()) + stable_kl(target, input. detach()) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp102 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp26 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp34 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp54 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp55 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp60 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp63 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp76 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp79 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp82 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp85 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tl.full([1, 1], 1, tl.int32) tmp18 = tmp17 / tmp16 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tmp21 = tmp20 - tmp19 tmp22 = tmp21 + tmp15 tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp27 = tl_math.exp(tmp26) tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tl_math.log(tmp36) tmp38 = tmp25 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp39 + tmp15 tmp41 = tmp17 / tmp40 tmp42 = tmp41 * tmp19 tmp43 = tmp42 - tmp19 tmp44 = tmp43 + tmp15 tmp45 = tl_math.log(tmp44) tmp46 = -tmp45 tmp47 = tmp24 - tmp46 tmp48 = tmp14 * tmp47 tmp49 = 2.0 tmp50 = tmp48 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = _tmp52 + tmp51 _tmp52 = tl.where(rmask, tmp53, _tmp52) tmp56 = tl_math.exp(tmp55) tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp64 = tl_math.exp(tmp63) tmp65 = tmp62 + tmp64 tmp66 = tl_math.log(tmp65) tmp67 = tmp54 - tmp66 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 + tmp15 tmp70 = tmp17 / tmp69 tmp71 = tmp70 * tmp19 tmp72 = tmp71 - tmp19 tmp73 = tmp72 + tmp15 tmp74 = tl_math.log(tmp73) tmp75 = -tmp74 tmp78 = tl_math.exp(tmp77) tmp80 = tl_math.exp(tmp79) tmp81 = tmp78 + tmp80 tmp83 = tl_math.exp(tmp82) tmp84 = tmp81 + tmp83 tmp86 = tl_math.exp(tmp85) tmp87 = tmp84 + tmp86 tmp88 = tl_math.log(tmp87) tmp89 = tmp76 - tmp88 tmp90 = tl_math.exp(tmp89) tmp91 = tmp90 + tmp15 tmp92 = tmp17 / tmp91 tmp93 = tmp92 * tmp19 tmp94 = tmp93 - tmp19 tmp95 = tmp94 + tmp15 tmp96 = tl_math.log(tmp95) tmp97 = -tmp96 tmp98 = tmp75 - tmp97 tmp99 = tmp68 * tmp98 tmp100 = tmp99 * tmp49 tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK]) tmp103 = _tmp102 + tmp101 _tmp102 = tl.where(rmask, tmp103, _tmp102) tmp52 = tl.sum(_tmp52, 1)[:, None] tmp102 = tl.sum(_tmp102, 1)[:, None] tmp104 = 0.015625 tmp105 = tmp52 * tmp104 tmp106 = tmp102 * tmp104 tmp107 = tmp105 + tmp106 tmp108 = 1.0 tmp109 = tmp107 * tmp108 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp109, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf10 = buf4 del buf4 triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1[ grid(1)](buf10, buf0, buf2, buf5, buf7, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 del buf5 del buf7 return buf10, def stable_kl(logit, target, epsilon=1e-06, reduce=True): logit = logit.view(-1, logit.size(-1)).float() target = target.view(-1, target.size(-1)).float() bs = logit.size(0) p = F.log_softmax(logit, 1).exp() y = F.log_softmax(target, 1).exp() rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log() ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log() if reduce: return (p * (rp - ry) * 2).sum() / bs else: return (p * (rp - ry) * 2).sum() class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class NsSymKlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
NsSymKlCriterion
false
10,212
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
Pooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ji/cji4hxxpm6hsn75kd2crjzu4er5k356aqcslauo4gbevbrw7m2ca.py # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.avg_pool2d, aten.add] # Source node to ATen node mapping: # x => avg_pool2d # x_2 => add # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, 0), kwargs = {}) triton_poi_fused_add_avg_pool2d_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool2d_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tmp33 = 0.0 tmp34 = tmp32 + tmp33 tl.store(in_out_ptr0 + (x0), tmp34, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.avg_pool2d, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool2d_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Pooling(nn.Module): def __init__(self, pooling_type=['GAP']): super(Pooling, self).__init__() self.pooling = [] for method in pooling_type: if method == 'GAP': self.pooling.append(F.avg_pool2d) elif method == 'GMP': self.pooling.append(F.max_pool2d) def forward(self, input_tensor): adaptiveAvgPoolWidth = input_tensor.shape[2] x_list = [] for pooling in self.pooling: x = pooling(input_tensor, kernel_size=adaptiveAvgPoolWidth) x = x.view(x.size(0), -1) x_list.append(x) x = sum(x_list) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_avg_pool2d_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tmp33 = 0.0 tmp34 = tmp32 + tmp33 tl.store(in_out_ptr0 + x0, tmp34, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_avg_pool2d_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class PoolingNew(nn.Module): def __init__(self, pooling_type=['GAP']): super(PoolingNew, self).__init__() self.pooling = [] for method in pooling_type: if method == 'GAP': self.pooling.append(F.avg_pool2d) elif method == 'GMP': self.pooling.append(F.max_pool2d) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
heebinYoo/proxy-synthesis-confidence-control-new
Pooling
false
10,213
[ "Apache-2.0" ]
0
c591cdffc30cf933bd242ba5646d2436a42a3181
https://github.com/heebinYoo/proxy-synthesis-confidence-control-new/tree/c591cdffc30cf933bd242ba5646d2436a42a3181
SymKlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py # Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax] # Source node to ATen node mapping: # log_softmax_1 => amax_2, sub_4 # softmax => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_2), kwargs = {}) triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7m/c7m62lqyfgv73n2cb7qnzr2xodmurvv7yy6z2m7nhu5m3pfdjjxe.py # Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # softmax_1 => amax_3, exp_3, sub_6 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {}) triton_poi_fused__log_softmax__softmax_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7u/c7udesn7p5qs3jrwgyf3hzg46tjjmy4azf3ycyya63euoc5gcym5.py # Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div, aten.add] # Source node to ATen node mapping: # kl_div => div_1, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # kl_div_1 => div_3, eq_1, full_default_2, full_default_3, isnan_1, log_3, mul_2, mul_3, sub_7, sum_6, where_2, where_3 # log_softmax => exp, log, sub_1, sum_1 # log_softmax_1 => exp_2, log_2, sub_5, sum_4 # loss => add # loss_1 => mul_4 # softmax => div, sum_2 # softmax_1 => div_2, sum_5 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1], True), kwargs = {}) # %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_5), kwargs = {}) # %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_3), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1], True), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_5), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_7,), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %div_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {}) triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp37 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp52 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp54 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = tmp44 == tmp10 tmp47 = tl_math.log(tmp44) tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp10, tmp48) tmp50 = tl.where(tmp45, tmp15, tmp49) tmp53 = tl_math.exp(tmp52) tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp63 = tl_math.log(tmp62) tmp64 = tmp51 - tmp63 tmp65 = tmp44 * tmp64 tmp66 = tmp50 - tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = _tmp68 + tmp67 _tmp68 = tl.where(rmask, tmp69, _tmp68) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp68 = tl.sum(_tmp68, 1)[:, None] tmp70 = 0.25 tmp71 = tmp34 * tmp70 tmp72 = tmp68 * tmp70 tmp73 = tmp71 + tmp72 tmp74 = 1.0 tmp75 = tmp73 * tmp74 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp75, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf6, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax] triton_poi_fused__log_softmax__softmax_1.run(arg0_1, buf2, buf4, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div, aten.add] triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2.run(buf8, buf0, buf2, buf4, buf6, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 del buf4 del buf6 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class SymKlCriterion(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1, reduction='batchmean'): """input/target: logits """ input = input.float() target = target.float() loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32), F.softmax(target.detach(), dim=-1, dtype=torch.float32), reduction=reduction) + F.kl_div(F.log_softmax(target, dim=-1, dtype=torch.float32), F.softmax(input.detach(), dim=-1, dtype= torch.float32), reduction=reduction) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp37 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp40 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp52 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp54 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp60 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = tmp44 == tmp10 tmp47 = tl_math.log(tmp44) tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp10, tmp48) tmp50 = tl.where(tmp45, tmp15, tmp49) tmp53 = tl_math.exp(tmp52) tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp63 = tl_math.log(tmp62) tmp64 = tmp51 - tmp63 tmp65 = tmp44 * tmp64 tmp66 = tmp50 - tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = _tmp68 + tmp67 _tmp68 = tl.where(rmask, tmp69, _tmp68) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp68 = tl.sum(_tmp68, 1)[:, None] tmp70 = 0.25 tmp71 = tmp34 * tmp70 tmp72 = tmp68 * tmp70 tmp73 = tmp71 + tmp72 tmp74 = 1.0 tmp75 = tmp73 * tmp74 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp75, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_1[grid(256)](arg0_1, buf2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3 del buf3 triton_red_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2[grid (1)](buf8, buf0, buf2, buf4, buf6, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 del buf4 del buf6 return buf8, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class SymKlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
chunhuililili/mt_dnn
SymKlCriterion
false
10,214
[ "MIT" ]
0
4c6efaf21724c7b8103a05e46b5b44d7b246225e
https://github.com/chunhuililili/mt_dnn/tree/4c6efaf21724c7b8103a05e46b5b44d7b246225e
Feedforward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/m6/cm6nixhuxyf76p5x5wtskqwfrb5kdl2bfda4t34yltftpilyxauy.py # Topologically Sorted Source Nodes: [inp], Original ATen: [aten.cat] # Source node to ATen node mapping: # inp => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (64*((-4) + x1))), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ow/cowe67anxreez4toyyqf662mlqiithr7fgdvoihceoifbly2hc4v.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/aj/cajrg52z57wyh2izr245yxqora6gw2izo4h7lecftzupjn6tbagi.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # output_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (100, 4), (4, 1)) assert_size_stride(primals_4, (100, ), (1, )) assert_size_stride(primals_5, (1, 100), (100, 1)) assert_size_stride(primals_6, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [inp], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (8, 4, 4, 100), (1600, 400, 100, 1), 0); del buf1 # reuse buf5 = empty_strided_cuda((8, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf5, 12800, grid=grid(12800), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (128, 100), (100, 1), 0), reinterpret_tensor(primals_5, (100, 1), (1, 100), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (8, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf4, primals_6, 128, grid=grid(128), stream=stream0) del primals_6 return (buf4, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(buf2, (128, 100), (100, 1), 0), buf4, primals_5, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Feedforward(torch.nn.Module): def __init__(self, input_size, hidden_size=100): super(Feedforward, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Linear(self.hidden_size, 1) self.sigmoid = torch.nn.Sigmoid() def forward(self, x, y): inp = torch.vstack([x, y]) hidden = self.fc1(inp) relu = self.relu(hidden) output = self.fc2(relu) output = self.sigmoid(output) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-4 + x1)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (100, 4), (4, 1)) assert_size_stride(primals_4, (100,), (1,)) assert_size_stride(primals_5, (1, 100), (100, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (8, 4, 4, 100), (1600, 400, 100, 1), 0) del buf1 buf5 = empty_strided_cuda((8, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(12800)](buf2, primals_4, buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (128, 100), (100, 1), 0), reinterpret_tensor(primals_5, (100, 1), (1, 100), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (8, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_sigmoid_2[grid(128)](buf4, primals_6, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_6 return buf4, reinterpret_tensor(buf0, (128, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (128, 100), (100, 1), 0 ), buf4, primals_5, buf5 class FeedforwardNew(torch.nn.Module): def __init__(self, input_size, hidden_size=100): super(FeedforwardNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Linear(self.hidden_size, 1) self.sigmoid = torch.nn.Sigmoid() def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jacob-parnell-rozetta/longformer_coverage
Feedforward
false
10,215
[ "Apache-2.0" ]
0
59268bc7ae7eeb962c43080e524eaf1e62100b6c
https://github.com/jacob-parnell-rozetta/longformer_coverage/tree/59268bc7ae7eeb962c43080e524eaf1e62100b6c
ToMono
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2c/c2caasuan6xkydnq2bvliamlyid6cl5fcz5kcz2mnyns45wtxqbs.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ToMono(nn.Module): def forward(self, waveform: 'torch.Tensor') ->torch.Tensor: return torch.mean(waveform, dim=0, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ToMonoNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
icyda17/very-deep-CNNs
ToMono
false
10,216
[ "Apache-2.0" ]
0
c275ef222d50dae90e508345ec3be5adfa5e33ce
https://github.com/icyda17/very-deep-CNNs/tree/c275ef222d50dae90e508345ec3be5adfa5e33ce
VAE_genes
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/g3/cg3el2gn3jo2uczn6kvxebxonhlsgf4gykdxpouwhsyjf55b5gdg.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/d7/cd7rilnjd42cirsc5dhnnwlficmjz5omrtsdfojgouhplcpynn4n.py # Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu] # Source node to ATen node mapping: # h2 => relu_1 # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_5), kwargs = {}) # %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ex/cexwuc6up4cv5uxb46cafc3eelvzsyfhdhsot7lgpz5xv7tenoc2.py # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # std => exp # z => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %addmm_2), kwargs = {}) triton_poi_fused_add_exp_mul_2 = async_compile.triton('triton_poi_fused_add_exp_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/vi/cvil5wapkv4i6x23roc23ppu6hrxwtoanlup7tzw5srvw23sealm.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3628 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 907 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (4, 907), (907, 1)) assert_size_stride(primals_2, (500, 907), (907, 1)) assert_size_stride(primals_3, (500, ), (1, )) assert_size_stride(primals_4, (100, 500), (500, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (5, 100), (100, 1)) assert_size_stride(primals_7, (5, ), (1, )) assert_size_stride(primals_8, (5, 100), (100, 1)) assert_size_stride(primals_9, (5, ), (1, )) assert_size_stride(primals_10, (100, 5), (5, 1)) assert_size_stride(primals_11, (100, ), (1, )) assert_size_stride(primals_12, (500, 100), (100, 1)) assert_size_stride(primals_13, (500, ), (1, )) assert_size_stride(primals_14, (907, 500), (500, 1)) assert_size_stride(primals_15, (907, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (907, 500), (1, 907), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 2000, grid=grid(2000), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 100), (1, 500), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 400, grid=grid(400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf5) del primals_9 # Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like] buf6 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] triton_poi_fused_add_exp_mul_2.run(buf7, buf5, buf4, buf8, 20, grid=grid(20), stream=stream0) buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (5, 100), (1, 5), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf10, primals_11, 400, grid=grid(400), stream=stream0) del primals_11 buf11 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf10, reinterpret_tensor(primals_12, (100, 500), (1, 100), 0), out=buf11) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [h4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf12, primals_13, 2000, grid=grid(2000), stream=stream0) del primals_13 buf13 = empty_strided_cuda((4, 907), (907, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_14, (500, 907), (1, 500), 0), out=buf13) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_3.run(buf14, primals_15, 3628, grid=grid(3628), stream=stream0) del primals_15 return (buf14, buf4, buf5, primals_1, buf1, buf3, buf5, buf7, buf8, buf10, buf12, buf14, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 907), (907, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((500, 907), (907, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((5, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((5, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((100, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((500, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((907, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((907, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn from torch.nn import functional as F class VAE_genes(nn.Module): def __init__(self): super(VAE_genes, self).__init__() self.input_linear = nn.Linear(907, 500) self.enc_middle = nn.Linear(500, 100) self.enc_1 = nn.Linear(100, 5) self.enc_2 = nn.Linear(100, 5) self.dec_0 = nn.Linear(5, 100) self.dec_middle = nn.Linear(100, 500) self.output_linear = nn.Linear(500, 907) def encode(self, x): h1 = F.relu(self.input_linear(x)) h2 = F.relu(self.enc_middle(h1)) return self.enc_1(h2), self.enc_2(h2) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.dec_0(z)) h4 = F.relu(self.dec_middle(h3)) return torch.sigmoid(self.output_linear(h4)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 907)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 907])] def get_init_inputs(): return [[], {}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3628 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 907 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 907), (907, 1)) assert_size_stride(primals_2, (500, 907), (907, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (100, 500), (500, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (5, 100), (100, 1)) assert_size_stride(primals_7, (5,), (1,)) assert_size_stride(primals_8, (5, 100), (100, 1)) assert_size_stride(primals_9, (5,), (1,)) assert_size_stride(primals_10, (100, 5), (5, 1)) assert_size_stride(primals_11, (100,), (1,)) assert_size_stride(primals_12, (500, 100), (100, 1)) assert_size_stride(primals_13, (500,), (1,)) assert_size_stride(primals_14, (907, 500), (500, 1)) assert_size_stride(primals_15, (907,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (907, 500), (1, 907), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2000)](buf1, primals_3, 2000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 100), ( 1, 500), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 5), (5, 1), torch.float32) triton_poi_fused_add_exp_mul_2[grid(20)](buf7, buf5, buf4, buf8, 20, XBLOCK=32, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (5, 100), (1, 5), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_1[grid(400)](buf10, primals_11, 400, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 buf11 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_12, (100, 500), (1, 100), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_0[grid(2000)](buf12, primals_13, 2000, XBLOCK =128, num_warps=4, num_stages=1) del primals_13 buf13 = empty_strided_cuda((4, 907), (907, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_14, (500, 907), (1, 500), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_sigmoid_3[grid(3628)](buf14, primals_15, 3628, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 return (buf14, buf4, buf5, primals_1, buf1, buf3, buf5, buf7, buf8, buf10, buf12, buf14, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4) class VAE_genesNew(nn.Module): def __init__(self): super(VAE_genesNew, self).__init__() self.input_linear = nn.Linear(907, 500) self.enc_middle = nn.Linear(500, 100) self.enc_1 = nn.Linear(100, 5) self.enc_2 = nn.Linear(100, 5) self.dec_0 = nn.Linear(5, 100) self.dec_middle = nn.Linear(100, 500) self.output_linear = nn.Linear(500, 907) def encode(self, x): h1 = F.relu(self.input_linear(x)) h2 = F.relu(self.enc_middle(h1)) return self.enc_1(h2), self.enc_2(h2) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.dec_0(z)) h4 = F.relu(self.dec_middle(h3)) return torch.sigmoid(self.output_linear(h4)) def forward(self, input_0): primals_2 = self.input_linear.weight primals_3 = self.input_linear.bias primals_4 = self.enc_middle.weight primals_5 = self.enc_middle.bias primals_6 = self.enc_1.weight primals_7 = self.enc_1.bias primals_8 = self.enc_2.weight primals_9 = self.enc_2.bias primals_10 = self.dec_0.weight primals_11 = self.dec_0.bias primals_12 = self.dec_middle.weight primals_13 = self.dec_middle.bias primals_14 = self.output_linear.weight primals_15 = self.output_linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0], output[1], output[2]
helenaandres/adversarial-generation-of-gene-expression-data
VAE_genes
false
10,217
[ "MIT" ]
0
9a10f0c364b7daa789ae75ab5b51ed5c7cbcbeb1
https://github.com/helenaandres/adversarial-generation-of-gene-expression-data/tree/9a10f0c364b7daa789ae75ab5b51ed5c7cbcbeb1
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/au/cauoqsg2cv2zjgirqxzbpuecwen54i554wu3e7y5ryzy3krptidd.py # Topologically Sorted Source Nodes: [mean, sub, std, truediv], Original ATen: [aten.mean, aten.sub, aten.std, aten.div] # Source node to ATen node mapping: # mean => mean # std => sqrt, var # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1,), kwargs = {correction: 1.0}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_per_fused_div_mean_std_sub_0 = async_compile.triton('triton_per_fused_div_mean_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp5 = tl.broadcast_to(tmp1, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp3 / tmp16 tmp18 = tmp0 - tmp17 tmp19 = 255.0 tmp20 = tmp15 / tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp18 / tmp21 tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub, std, truediv], Original ATen: [aten.mean, aten.sub, aten.std, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_mean_std_sub_0.run(arg0_1, buf4, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Normalize(nn.Module): def forward(self, waveform: 'torch.Tensor') ->torch.Tensor: return (waveform - waveform.mean()) / waveform.std() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp5 = tl.broadcast_to(tmp1, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp3 / tmp16 tmp18 = tmp0 - tmp17 tmp19 = 255.0 tmp20 = tmp15 / tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp18 / tmp21 tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp22, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mean_std_sub_0[grid(1)](arg0_1, buf4, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf4, class NormalizeNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
icyda17/very-deep-CNNs
Normalize
false
10,218
[ "Apache-2.0" ]
0
c275ef222d50dae90e508345ec3be5adfa5e33ce
https://github.com/icyda17/very-deep-CNNs/tree/c275ef222d50dae90e508345ec3be5adfa5e33ce
Pad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/4m/c4m5w4y7qnqrt7o32xjvwkhlxdmbadwmibuly55v4t4jrg6szca5.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [0, 0], 4.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Pad(nn.Module): def __init__(self, value: 'float', size: 'int'): super().__init__() self.value = value self.size = size def forward(self, waveform: 'torch.Tensor') ->torch.Tensor: return F.pad(waveform, (0, self.size - max(waveform.shape)), 'constant', self.value) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'value': 4, 'size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PadNew(nn.Module): def __init__(self, value: 'float', size: 'int'): super().__init__() self.value = value self.size = size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
icyda17/very-deep-CNNs
Pad
false
10,219
[ "Apache-2.0" ]
0
c275ef222d50dae90e508345ec3be5adfa5e33ce
https://github.com/icyda17/very-deep-CNNs/tree/c275ef222d50dae90e508345ec3be5adfa5e33ce
SeeInDark
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ef/ceftracrmerrbemjjdhh7djkqjczvpuee7nri7hosphedyfg4qun.py # Topologically Sorted Source Nodes: [conv2d, mul, outt], Original ATen: [aten.convolution, aten.mul, aten.maximum] # Source node to ATen node mapping: # conv2d => convolution # mul => mul # outt => maximum # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %maximum : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %convolution), kwargs = {}) triton_poi_fused_convolution_maximum_mul_0 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_maximum_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py # Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/op/cop5rjn3v7lha5wdtulxqjcsubv55pvwiymxzcm62e6coonrliwx.py # Topologically Sorted Source Nodes: [conv2d_2, mul_2, outt_2], Original ATen: [aten.convolution, aten.mul, aten.maximum] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # mul_2 => mul_2 # outt_2 => maximum_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %maximum_2 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%mul_2, %convolution_2), kwargs = {}) triton_poi_fused_convolution_maximum_mul_2 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_maximum_mul_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gq/cgqvpghyzwkbypvbvlyig6ohuplw6qvhn73hkwj6auj5e2m5mqio.py # Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool2 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6l/c6lesexdwr5nm6d7lsdkgo4l566e3scq6zxgfqxi47wmtf4kto7r.py # Topologically Sorted Source Nodes: [conv2d_4, mul_4, outt_4], Original ATen: [aten.convolution, aten.mul, aten.maximum] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # mul_4 => mul_4 # outt_4 => maximum_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %maximum_4 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%mul_4, %convolution_4), kwargs = {}) triton_poi_fused_convolution_maximum_mul_4 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_maximum_mul_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sa/csaftzx2wizpjlh3kfhvehexsbodntymkhlieviiunpbabb3qdpw.py # Topologically Sorted Source Nodes: [pool3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool3 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5g/c5gabmoosr2vahlotqfnxbwjnu33tmpcwuvsws5hasjyjdottuvk.py # Topologically Sorted Source Nodes: [conv2d_6, mul_6, outt_6], Original ATen: [aten.convolution, aten.mul, aten.maximum] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # mul_6 => mul_6 # outt_6 => maximum_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.2), kwargs = {}) # %maximum_6 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%mul_6, %convolution_6), kwargs = {}) triton_poi_fused_convolution_maximum_mul_6 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_maximum_mul_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/v4/cv4bgr5ihpscky5gololr6sh7rfrpysfa3a6blhdfz537nybxcot.py # Topologically Sorted Source Nodes: [pool4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool4 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zu/czurhuw2o4whoaspnnjlxyoahd4og2ffmx4mgjiohvfvsfoewn7r.py # Topologically Sorted Source Nodes: [conv2d_8, mul_8, outt_8], Original ATen: [aten.convolution, aten.mul, aten.maximum] # Source node to ATen node mapping: # conv2d_8 => convolution_8 # mul_8 => mul_8 # outt_8 => maximum_8 # Graph fragment: # %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.2), kwargs = {}) # %maximum_8 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%mul_8, %convolution_8), kwargs = {}) triton_poi_fused_convolution_maximum_mul_8 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_maximum_mul_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4q/c4q7b54jkvdrigbh2hhwfdixd3omegnlfydgs7vrdotzztryz2d4.py # Topologically Sorted Source Nodes: [up6_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up6_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_10, %maximum_7], 1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 512 x0 = xindex % 64 x2 = (xindex // 32768) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 512, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ij/cija3olw6cr62rftfqhakzgcme3krfny32x734h6puwoynmn36e3.py # Topologically Sorted Source Nodes: [up7_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up7_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_13, %maximum_5], 1), kwargs = {}) triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 256) % 256 x0 = xindex % 256 x2 = (xindex // 65536) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 256, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/n7/cn72godhzk2nf2qoevmkpkh7utghrsdupxxpx6bnjekioqeej2cz.py # Topologically Sorted Source Nodes: [up8_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up8_1 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %maximum_3], 1), kwargs = {}) triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 1024) % 128 x0 = xindex % 1024 x2 = (xindex // 131072) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 128, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/el/cely3mvdjhv7xihjloi6dtillfxfr2cgcvve2nzawetqs4vi5xjq.py # Topologically Sorted Source Nodes: [up9_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up9_1 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_19, %maximum_1], 1), kwargs = {}) triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 64 x0 = xindex % 4096 x2 = (xindex // 262144) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 64, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wn/cwn3n6pc3p5yfpg2qoyin6heoasg375jez7odcopiodaao5bowrc.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.pixel_shuffle] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_pixel_shuffle_13 = async_compile.triton('triton_poi_fused_pixel_shuffle_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 2], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pixel_shuffle_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 98304 xnumel = 2 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 64 y1 = (yindex // 64) % 2 y2 = (yindex // 128) % 64 y6 = (yindex // 8192) y3 = (yindex // 8192) % 3 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*y2) + (4096*x5) + (8192*y1) + (16384*y6)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (2*y1) + (4*y3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + (2*y7)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_23, (256, ), (1, )) assert_size_stride(primals_24, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (256, ), (1, )) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_29, (128, ), (1, )) assert_size_stride(primals_30, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (128, ), (1, )) assert_size_stride(primals_32, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_33, (128, ), (1, )) assert_size_stride(primals_34, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_35, (64, ), (1, )) assert_size_stride(primals_36, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (64, ), (1, )) assert_size_stride(primals_38, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_39, (64, ), (1, )) assert_size_stride(primals_40, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_41, (32, ), (1, )) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32, ), (1, )) assert_size_stride(primals_44, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_45, (32, ), (1, )) assert_size_stride(primals_46, (12, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_47, (12, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, mul, outt], Original ATen: [aten.convolution, aten.mul, aten.maximum] stream0 = get_raw_stream(0) triton_poi_fused_convolution_maximum_mul_0.run(buf1, primals_2, buf2, 524288, grid=grid(524288), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, mul_1, outt_1], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_0.run(buf4, primals_5, buf5, 524288, grid=grid(524288), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf5, buf6, buf7, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = buf8; del buf8 # reuse buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, mul_2, outt_2], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_2.run(buf9, primals_7, buf10, 262144, grid=grid(262144), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = buf11; del buf11 # reuse buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_3, mul_3, outt_3], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_2.run(buf12, primals_9, buf13, 262144, grid=grid(262144), stream=stream0) del primals_9 buf14 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf13, buf14, buf15, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1)) buf17 = buf16; del buf16 # reuse buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_4, mul_4, outt_4], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_4.run(buf17, primals_11, buf18, 131072, grid=grid(131072), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = buf19; del buf19 # reuse buf21 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_5, mul_5, outt_5], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_4.run(buf20, primals_13, buf21, 131072, grid=grid(131072), stream=stream0) del primals_13 buf22 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) buf23 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [pool3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf21, buf22, buf23, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = buf24; del buf24 # reuse buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_6, mul_6, outt_6], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_6.run(buf25, primals_15, buf26, 65536, grid=grid(65536), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = buf27; del buf27 # reuse buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_7, mul_7, outt_7], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_6.run(buf28, primals_17, buf29, 65536, grid=grid(65536), stream=stream0) del primals_17 buf30 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) buf31 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.int8) # Topologically Sorted Source Nodes: [pool4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf29, buf30, buf31, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf30, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 4, 4), (8192, 16, 4, 1)) buf33 = buf32; del buf32 # reuse buf34 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_8, mul_8, outt_8], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_8.run(buf33, primals_19, buf34, 32768, grid=grid(32768), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 4, 4), (8192, 16, 4, 1)) buf36 = buf35; del buf35 # reuse buf37 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_9, mul_9, outt_9], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_8.run(buf36, primals_21, buf37, 32768, grid=grid(32768), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [up6], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1)) buf39 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [up6_1], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf38, primals_23, buf29, buf39, 131072, grid=grid(131072), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 256, 8, 8), (16384, 64, 8, 1)) buf41 = buf40; del buf40 # reuse buf42 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_10, mul_10, outt_10], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_6.run(buf41, primals_25, buf42, 65536, grid=grid(65536), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf42, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 8, 8), (16384, 64, 8, 1)) buf44 = buf43; del buf43 # reuse buf45 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_11, mul_11, outt_11], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_6.run(buf44, primals_27, buf45, 65536, grid=grid(65536), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [up7], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_28, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1)) buf47 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [up7_1], Original ATen: [aten.cat] triton_poi_fused_cat_10.run(buf46, primals_29, buf21, buf47, 262144, grid=grid(262144), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1)) buf49 = buf48; del buf48 # reuse buf50 = buf46; del buf46 # reuse # Topologically Sorted Source Nodes: [conv2d_12, mul_12, outt_12], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_4.run(buf49, primals_31, buf50, 131072, grid=grid(131072), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf50, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 16, 16), (32768, 256, 16, 1)) buf52 = buf51; del buf51 # reuse buf53 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_13, mul_13, outt_13], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_4.run(buf52, primals_33, buf53, 131072, grid=grid(131072), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [up8], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf53, primals_34, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf55 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [up8_1], Original ATen: [aten.cat] triton_poi_fused_cat_11.run(buf54, primals_35, buf13, buf55, 524288, grid=grid(524288), stream=stream0) del primals_35 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf56 = extern_kernels.convolution(buf55, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf57 = buf56; del buf56 # reuse buf58 = buf54; del buf54 # reuse # Topologically Sorted Source Nodes: [conv2d_14, mul_14, outt_14], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_2.run(buf57, primals_37, buf58, 262144, grid=grid(262144), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf59 = extern_kernels.convolution(buf58, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf60 = buf59; del buf59 # reuse buf61 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_15, mul_15, outt_15], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_2.run(buf60, primals_39, buf61, 262144, grid=grid(262144), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [up9], Original ATen: [aten.convolution] buf62 = extern_kernels.convolution(buf61, primals_40, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [up9_1], Original ATen: [aten.cat] triton_poi_fused_cat_12.run(buf62, primals_41, buf5, buf63, 1048576, grid=grid(1048576), stream=stream0) del primals_41 # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf63, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf65 = buf64; del buf64 # reuse buf66 = buf62; del buf62 # reuse # Topologically Sorted Source Nodes: [conv2d_16, mul_16, outt_16], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_0.run(buf65, primals_43, buf66, 524288, grid=grid(524288), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf67 = extern_kernels.convolution(buf66, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf68 = buf67; del buf67 # reuse buf69 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_17, mul_17, outt_17], Original ATen: [aten.convolution, aten.mul, aten.maximum] triton_poi_fused_convolution_maximum_mul_0.run(buf68, primals_45, buf69, 524288, grid=grid(524288), stream=stream0) del primals_45 # Topologically Sorted Source Nodes: [conv10], Original ATen: [aten.convolution] buf70 = extern_kernels.convolution(buf69, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf71 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256, 128, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.pixel_shuffle] triton_poi_fused_pixel_shuffle_13.run(buf70, primals_47, buf71, 98304, 2, grid=grid(98304, 2), stream=stream0) del buf70 del primals_47 return (reinterpret_tensor(buf71, (4, 3, 128, 128), (49152, 16384, 128, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf7, buf9, buf10, buf12, buf13, buf14, buf15, buf17, buf18, buf20, buf21, buf22, buf23, buf25, buf26, buf28, buf29, buf30, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68, buf69, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((64, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((12, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SeeInDark(nn.Module): def __init__(self, num_classes=10): super(SeeInDark, self).__init__() self.conv1_1 = nn.Conv2d(4, 32, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2_1 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2) self.conv3_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2) self.conv4_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2) self.conv5_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.upv6 = nn.ConvTranspose2d(512, 256, 2, stride=2) self.conv6_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.upv7 = nn.ConvTranspose2d(256, 128, 2, stride=2) self.conv7_1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.upv8 = nn.ConvTranspose2d(128, 64, 2, stride=2) self.conv8_1 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upv9 = nn.ConvTranspose2d(64, 32, 2, stride=2) self.conv9_1 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1) self.conv9_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.conv10_1 = nn.Conv2d(32, 12, kernel_size=1, stride=1) def forward(self, x): conv1 = self.lrelu(self.conv1_1(x)) conv1 = self.lrelu(self.conv1_2(conv1)) pool1 = self.pool1(conv1) conv2 = self.lrelu(self.conv2_1(pool1)) conv2 = self.lrelu(self.conv2_2(conv2)) pool2 = self.pool1(conv2) conv3 = self.lrelu(self.conv3_1(pool2)) conv3 = self.lrelu(self.conv3_2(conv3)) pool3 = self.pool1(conv3) conv4 = self.lrelu(self.conv4_1(pool3)) conv4 = self.lrelu(self.conv4_2(conv4)) pool4 = self.pool1(conv4) conv5 = self.lrelu(self.conv5_1(pool4)) conv5 = self.lrelu(self.conv5_2(conv5)) up6 = self.upv6(conv5) up6 = torch.cat([up6, conv4], 1) conv6 = self.lrelu(self.conv6_1(up6)) conv6 = self.lrelu(self.conv6_2(conv6)) up7 = self.upv7(conv6) up7 = torch.cat([up7, conv3], 1) conv7 = self.lrelu(self.conv7_1(up7)) conv7 = self.lrelu(self.conv7_2(conv7)) up8 = self.upv8(conv7) up8 = torch.cat([up8, conv2], 1) conv8 = self.lrelu(self.conv8_1(up8)) conv8 = self.lrelu(self.conv8_2(conv8)) up9 = self.upv9(conv8) up9 = torch.cat([up9, conv1], 1) conv9 = self.lrelu(self.conv9_1(up9)) conv9 = self.lrelu(self.conv9_2(conv9)) conv10 = self.conv10_1(conv9) out = nn.functional.pixel_shuffle(conv10, 2) return out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.normal_(0.0, 0.02) if isinstance(m, nn.ConvTranspose2d): m.weight.data.normal_(0.0, 0.02) def lrelu(self, x): outt = torch.max(0.2 * x, x) return outt def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_maximum_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_maximum_mul_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_maximum_mul_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_maximum_mul_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_maximum_mul_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp2) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_pixel_shuffle_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 2 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 64 y1 = yindex // 64 % 2 y2 = yindex // 128 % 64 y6 = yindex // 8192 y3 = yindex // 8192 % 3 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * y2 + 4096 * x5 + 8192 * y1 + 16384 * y6), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 2 * y1 + 4 * y3), xmask, eviction_policy ='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + 2 * y7), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_35, (64,), (1,)) assert_size_stride(primals_36, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (64,), (1,)) assert_size_stride(primals_38, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_41, (32,), (1,)) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32,), (1,)) assert_size_stride(primals_44, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (12, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_47, (12,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_maximum_mul_0[grid(524288)](buf1, primals_2, buf2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_0[grid(524288)](buf4, primals_5, buf5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf5, buf6, buf7, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_2[grid(262144)](buf9, primals_7, buf10, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_2[grid(262144)](buf12, primals_9, buf13, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(65536)](buf13, buf14, buf15, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1)) buf17 = buf16 del buf16 buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_4[grid(131072)](buf17, primals_11, buf18, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_4[grid(131072)](buf20, primals_13, buf21, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf22 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) buf23 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.int8 ) triton_poi_fused_max_pool2d_with_indices_5[grid(32768)](buf21, buf22, buf23, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_maximum_mul_6[grid(65536)](buf25, primals_15, buf26, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_15 buf27 = extern_kernels.convolution(buf26, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_maximum_mul_6[grid(65536)](buf28, primals_17, buf29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_17 buf30 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) buf31 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.int8 ) triton_poi_fused_max_pool2d_with_indices_7[grid(16384)](buf29, buf30, buf31, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf32 = extern_kernels.convolution(buf30, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 4, 4), (8192, 16, 4, 1)) buf33 = buf32 del buf32 buf34 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_maximum_mul_8[grid(32768)](buf33, primals_19, buf34, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf35 = extern_kernels.convolution(buf34, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 4, 4), (8192, 16, 4, 1)) buf36 = buf35 del buf35 buf37 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_maximum_mul_8[grid(32768)](buf36, primals_21, buf37, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 buf38 = extern_kernels.convolution(buf37, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1)) buf39 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_cat_9[grid(131072)](buf38, primals_23, buf29, buf39, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 256, 8, 8), (16384, 64, 8, 1)) buf41 = buf40 del buf40 buf42 = buf38 del buf38 triton_poi_fused_convolution_maximum_mul_6[grid(65536)](buf41, primals_25, buf42, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_25 buf43 = extern_kernels.convolution(buf42, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 8, 8), (16384, 64, 8, 1)) buf44 = buf43 del buf43 buf45 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_maximum_mul_6[grid(65536)](buf44, primals_27, buf45, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_27 buf46 = extern_kernels.convolution(buf45, primals_28, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1)) buf47 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) triton_poi_fused_cat_10[grid(262144)](buf46, primals_29, buf21, buf47, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_29 buf48 = extern_kernels.convolution(buf47, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1)) buf49 = buf48 del buf48 buf50 = buf46 del buf46 triton_poi_fused_convolution_maximum_mul_4[grid(131072)](buf49, primals_31, buf50, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf51 = extern_kernels.convolution(buf50, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 16, 16), (32768, 256, 16, 1)) buf52 = buf51 del buf51 buf53 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_4[grid(131072)](buf52, primals_33, buf53, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_33 buf54 = extern_kernels.convolution(buf53, primals_34, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf55 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) triton_poi_fused_cat_11[grid(524288)](buf54, primals_35, buf13, buf55, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_35 buf56 = extern_kernels.convolution(buf55, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf57 = buf56 del buf56 buf58 = buf54 del buf54 triton_poi_fused_convolution_maximum_mul_2[grid(262144)](buf57, primals_37, buf58, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_37 buf59 = extern_kernels.convolution(buf58, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf60 = buf59 del buf59 buf61 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_2[grid(262144)](buf60, primals_39, buf61, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_39 buf62 = extern_kernels.convolution(buf61, primals_40, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_12[grid(1048576)](buf62, primals_41, buf5, buf63, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_41 buf64 = extern_kernels.convolution(buf63, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf65 = buf64 del buf64 buf66 = buf62 del buf62 triton_poi_fused_convolution_maximum_mul_0[grid(524288)](buf65, primals_43, buf66, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf67 = extern_kernels.convolution(buf66, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf68 = buf67 del buf67 buf69 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_maximum_mul_0[grid(524288)](buf68, primals_45, buf69, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_45 buf70 = extern_kernels.convolution(buf69, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf71 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256, 128, 2, 1), torch.float32) triton_poi_fused_pixel_shuffle_13[grid(98304, 2)](buf70, primals_47, buf71, 98304, 2, XBLOCK=2, YBLOCK=512, num_warps=4, num_stages=1) del buf70 del primals_47 return (reinterpret_tensor(buf71, (4, 3, 128, 128), (49152, 16384, 128, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf7, buf9, buf10, buf12, buf13, buf14, buf15, buf17, buf18, buf20, buf21, buf22, buf23, buf25, buf26, buf28, buf29, buf30, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68, buf69) class SeeInDarkNew(nn.Module): def __init__(self, num_classes=10): super(SeeInDarkNew, self).__init__() self.conv1_1 = nn.Conv2d(4, 32, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2_1 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2) self.conv3_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2) self.conv4_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2) self.conv5_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.upv6 = nn.ConvTranspose2d(512, 256, 2, stride=2) self.conv6_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.upv7 = nn.ConvTranspose2d(256, 128, 2, stride=2) self.conv7_1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.upv8 = nn.ConvTranspose2d(128, 64, 2, stride=2) self.conv8_1 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upv9 = nn.ConvTranspose2d(64, 32, 2, stride=2) self.conv9_1 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1) self.conv9_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.conv10_1 = nn.Conv2d(32, 12, kernel_size=1, stride=1) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.normal_(0.0, 0.02) if isinstance(m, nn.ConvTranspose2d): m.weight.data.normal_(0.0, 0.02) def lrelu(self, x): outt = torch.max(0.2 * x, x) return outt def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv4_1.weight primals_15 = self.conv4_1.bias primals_16 = self.conv4_2.weight primals_17 = self.conv4_2.bias primals_18 = self.conv5_1.weight primals_19 = self.conv5_1.bias primals_20 = self.conv5_2.weight primals_21 = self.conv5_2.bias primals_22 = self.upv6.weight primals_23 = self.upv6.bias primals_24 = self.conv6_1.weight primals_25 = self.conv6_1.bias primals_26 = self.conv6_2.weight primals_27 = self.conv6_2.bias primals_28 = self.upv7.weight primals_29 = self.upv7.bias primals_30 = self.conv7_1.weight primals_31 = self.conv7_1.bias primals_32 = self.conv7_2.weight primals_33 = self.conv7_2.bias primals_34 = self.upv8.weight primals_35 = self.upv8.bias primals_36 = self.conv8_1.weight primals_37 = self.conv8_1.bias primals_38 = self.conv8_2.weight primals_39 = self.conv8_2.bias primals_40 = self.upv9.weight primals_41 = self.upv9.bias primals_42 = self.conv9_1.weight primals_43 = self.conv9_1.bias primals_44 = self.conv9_2.weight primals_45 = self.conv9_2.bias primals_46 = self.conv10_1.weight primals_47 = self.conv10_1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return output[0]
hyeokjae-choi/pytorch-Learning-to-See-in-the-Dark
SeeInDark
false
10,220
[ "MIT" ]
0
b32bf991072decb3aea348d8cd59acbf34d5da2c
https://github.com/hyeokjae-choi/pytorch-Learning-to-See-in-the-Dark/tree/b32bf991072decb3aea348d8cd59acbf34d5da2c
HardtanhBoundToPOTNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/gk/cgkuacyg67ax6cn7dvso4vxfaggi56wy2cy6kvl4rdwv4oaicpmx.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # x => convolution # x_1 => clamp_max, clamp_min # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%convolution, 0.0), kwargs = {}) # %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) # %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%convolution, 0.0), kwargs = {}) # %ge_3 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%convolution, 6.0), kwargs = {}) # %bitwise_or_3 : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le_4, %ge_3), kwargs = {}) triton_poi_fused_convolution_hardtanh_hardtanh_backward_0 = async_compile.triton('triton_poi_fused_convolution_hardtanh_hardtanh_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardtanh_hardtanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7t/c7thipsuxejpdhfqhgxxoz3i72ctiepm6n5e45wy4oj7j27wj3sd.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%clamp_max, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2n/c2nyodo6u3mtlqksgttia3n2cnb4psn5qjzm3rjctdlkz2zjkyl5.py # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # x_3 => convolution_2 # x_4 => clamp_max_1, clamp_min_1 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%convolution_2, -2.0), kwargs = {}) # %clamp_max_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 6.0), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%convolution_2, -2.0), kwargs = {}) # %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%convolution_2, 6.0), kwargs = {}) # %bitwise_or_2 : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le_3, %ge_2), kwargs = {}) triton_poi_fused_convolution_hardtanh_hardtanh_backward_2 = async_compile.triton('triton_poi_fused_convolution_hardtanh_hardtanh_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardtanh_hardtanh_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -2.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dt/cdtmu5coxf6a6z6a7qbu62uondg6byo3xrxzschwxd2u25cbfnvy.py # Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # x_10 => clamp_max_3, clamp_min_3 # x_9 => convolution_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_5, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%convolution_6, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 4.0), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%convolution_6, 0.0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%convolution_6, 4.0), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le_1, %ge), kwargs = {}) triton_poi_fused_convolution_hardtanh_hardtanh_backward_3 = async_compile.triton('triton_poi_fused_convolution_hardtanh_hardtanh_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardtanh_hardtanh_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 4.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/za/czawibcnybpf6z2ppwursqentvf7fhekh5skfgse6p4sezyfuulo.py # Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_12 => convolution_8 # x_13 => relu # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_7, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, None) tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (3, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_5, (3, ), (1, )) assert_size_stride(primals_6, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_7, (3, ), (1, )) assert_size_stride(primals_8, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_9, (3, ), (1, )) assert_size_stride(primals_10, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_11, (3, ), (1, )) assert_size_stride(primals_12, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_13, (3, ), (1, )) assert_size_stride(primals_14, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_15, (3, ), (1, )) assert_size_stride(primals_16, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_17, (3, ), (1, )) assert_size_stride(primals_18, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_19, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) buf22 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_hardtanh_hardtanh_backward_0.run(buf0, primals_2, buf1, buf22, 49152, grid=grid(49152), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 49152, grid=grid(49152), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf5 = buf0; del buf0 # reuse buf21 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] triton_poi_fused_convolution_hardtanh_hardtanh_backward_2.run(buf4, primals_7, buf5, buf21, 49152, grid=grid(49152), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf7, primals_9, 49152, grid=grid(49152), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf9 = buf4; del buf4 # reuse buf20 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] triton_poi_fused_convolution_hardtanh_hardtanh_backward_0.run(buf8, primals_11, buf9, buf20, 49152, grid=grid(49152), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf11, primals_13, 49152, grid=grid(49152), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf13 = buf8; del buf8 # reuse buf19 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_9, x_10], Original ATen: [aten.convolution, aten.hardtanh, aten.hardtanh_backward] triton_poi_fused_convolution_hardtanh_hardtanh_backward_3.run(buf12, primals_15, buf13, buf19, 49152, grid=grid(49152), stream=stream0) del buf12 del primals_15 # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf15, primals_17, 49152, grid=grid(49152), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf17 = buf16; del buf16 # reuse buf18 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_4.run(buf17, primals_19, buf18, 49152, grid=grid(49152), stream=stream0) del primals_19 return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf18, buf19, buf20, buf21, buf22, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import Conv2d from torch.nn import Hardtanh from torch.nn.functional import relu from torch.nn.functional import hardtanh import torch.nn.functional class HardtanhBoundToPOTNet(torch.nn.Module): def __init__(self): super(HardtanhBoundToPOTNet, self).__init__() self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1) self.hardtanh1 = Hardtanh(min_val=0.0, max_val=6.0) self.conv2 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv3 = Conv2d(3, 3, kernel_size=1, stride=1) self.hardtanh2 = Hardtanh(min_val=-2.0, max_val=6.0) self.conv4 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv5 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv6 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv7 = Conv2d(3, 3, kernel_size=1, stride=1) self.hardtanh3 = Hardtanh(min_val=0.0, max_val=4.0) self.conv8 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv9 = Conv2d(3, 3, kernel_size=1, stride=1) def forward(self, inp): x = self.conv1(inp) x = self.hardtanh1(x) x = self.conv2(x) x = self.conv3(x) x = self.hardtanh2(x) x = self.conv4(x) x = self.conv5(x) x = hardtanh(x, min_val=0.0, max_val=6.0) x = self.conv6(x) x = self.conv7(x) x = self.hardtanh3(x) x = self.conv8(x) x = self.conv9(x) x = relu(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Conv2d from torch.nn import Hardtanh import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -2.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 4.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_5, (3,), (1,)) assert_size_stride(primals_6, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_7, (3,), (1,)) assert_size_stride(primals_8, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_9, (3,), (1,)) assert_size_stride(primals_10, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_11, (3,), (1,)) assert_size_stride(primals_12, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_13, (3,), (1,)) assert_size_stride(primals_14, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_15, (3,), (1,)) assert_size_stride(primals_16, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_17, (3,), (1,)) assert_size_stride(primals_18, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_19, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) buf22 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_hardtanh_hardtanh_backward_0[grid(49152)]( buf0, primals_2, buf1, buf22, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(49152)](buf3, primals_5, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf5 = buf0 del buf0 buf21 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_hardtanh_hardtanh_backward_2[grid(49152)]( buf4, primals_7, buf5, buf21, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_1[grid(49152)](buf7, primals_9, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf9 = buf4 del buf4 buf20 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_hardtanh_hardtanh_backward_0[grid(49152)]( buf8, primals_11, buf9, buf20, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_1[grid(49152)](buf11, primals_13, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf13 = buf8 del buf8 buf19 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_hardtanh_hardtanh_backward_3[grid(49152)]( buf12, primals_15, buf13, buf19, 49152, XBLOCK=512, num_warps=4, num_stages=1) del buf12 del primals_15 buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_1[grid(49152)](buf15, primals_17, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_17 buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf17 = buf16 del buf16 buf18 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(49152)]( buf17, primals_19, buf18, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_19 return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf18, buf19, buf20, buf21, buf22) class HardtanhBoundToPOTNetNew(torch.nn.Module): def __init__(self): super(HardtanhBoundToPOTNetNew, self).__init__() self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1) self.hardtanh1 = Hardtanh(min_val=0.0, max_val=6.0) self.conv2 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv3 = Conv2d(3, 3, kernel_size=1, stride=1) self.hardtanh2 = Hardtanh(min_val=-2.0, max_val=6.0) self.conv4 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv5 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv6 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv7 = Conv2d(3, 3, kernel_size=1, stride=1) self.hardtanh3 = Hardtanh(min_val=0.0, max_val=4.0) self.conv8 = Conv2d(3, 3, kernel_size=1, stride=1) self.conv9 = Conv2d(3, 3, kernel_size=1, stride=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_12 = self.conv6.weight primals_13 = self.conv6.bias primals_14 = self.conv7.weight primals_15 = self.conv7.bias primals_16 = self.conv8.weight primals_17 = self.conv8.bias primals_18 = self.conv9.weight primals_19 = self.conv9.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
isabella232/model_optimization
HardtanhBoundToPOTNet
false
10,221
[ "Apache-2.0" ]
0
074d1dfd8b4d18e57c6186c0ec5e49eb17a0fc7a
https://github.com/isabella232/model_optimization/tree/074d1dfd8b4d18e57c6186c0ec5e49eb17a0fc7a
Unet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2m/c2mwdf64imuljijtd53jyy22jmby3trx2gvbjen2dgwlh3cmrogd.py # Topologically Sorted Source Nodes: [x0], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x0 => convolution # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16777216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 65536) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jx/cjxc7n2c2qftkuoyjgqkrtipbmedojcryw46meeo2l4brbghgofz.py # Topologically Sorted Source Nodes: [conv2d_1, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 33032192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64516) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/x4/cx4atkmein2etossw3h3yntx5pgwur3fhs3v3me3zacbhadxzzwd.py # Topologically Sorted Source Nodes: [conv2d_2, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_1 => relu_1 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32514048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 63504) % 128 x0 = xindex % 63504 x4 = (xindex // 63504) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (63520*x4)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2y/c2ymfwfwccjwieeycyyuw4hakntpg7spghweswcwbgg3luk7lvgl.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8128512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 126 x1 = (xindex // 126) % 126 x2 = (xindex // 15876) x3 = xindex % 15876 tmp0 = tl.load(in_ptr0 + ((2*x0) + (504*x1) + (63520*x2)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (504*x1) + (63520*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (252 + (2*x0) + (504*x1) + (63520*x2)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (253 + (2*x0) + (504*x1) + (63520*x2)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + (15904*x2)), tmp6, None) tl.store(out_ptr1 + (x3 + (16000*x2)), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2w/c2wyw57bn3lj5bprc2w5lfz4esfjux6eykgf6f7tpchh3wfn52c3.py # Topologically Sorted Source Nodes: [conv2d_3, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_2 => relu_2 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 15745024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 15376) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/g2/cg27b3cldqn2pktgcgwkn6bhtvvz2l4o6352tltcsw34o3b2v347.py # Topologically Sorted Source Nodes: [conv2d_4, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_3 => relu_3 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 15241216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 14884) % 256 x0 = xindex % 14884 x4 = (xindex // 14884) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (14912*x4)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7k/c7kyvzhgks53stlmbqm3nvt66lvr3ua3nvqh42nzvr5ahgnl7ngk.py # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x2 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 3810304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 61 x1 = (xindex // 61) % 61 x2 = (xindex // 3721) x3 = xindex % 3721 tmp0 = tl.load(in_ptr0 + ((2*x0) + (244*x1) + (14912*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (244*x1) + (14912*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (122 + (2*x0) + (244*x1) + (14912*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (123 + (2*x0) + (244*x1) + (14912*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + (3744*x2)), tmp6, xmask) tl.store(out_ptr1 + (x3 + (3840*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yl/cylprii3tymutljog6hgf2lhr336pibwctoxtfiafi2pgkx6grqx.py # Topologically Sorted Source Nodes: [conv2d_5, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_4 => relu_4 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7129088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 3481) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jg/cjg3tcflq5lpynuzcay73tqom5egacupheubg3mdhme3oypxjrmz.py # Topologically Sorted Source Nodes: [conv2d_6, x_5], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # x_5 => relu_5 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6653952 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 3249) % 512 x0 = xindex % 3249 x4 = (xindex // 3249) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3264*x4)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/74/c74eg2jsx6eq7gpmva5dbgb4nw2rq7cbz2aol7k7wqalccrixnbx.py # Topologically Sorted Source Nodes: [x3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x3 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1605632 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 28 x1 = (xindex // 28) % 28 x2 = (xindex // 784) x3 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (114*x1) + (3264*x2)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (114*x1) + (3264*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (57 + (2*x0) + (114*x1) + (3264*x2)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (58 + (2*x0) + (114*x1) + (3264*x2)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lw/clw4r3v6kgmknyimlru6icw7vq2zpydzxfleqrvnyuzcxyt2tbfb.py # Topologically Sorted Source Nodes: [conv2d_7, x_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # x_6 => relu_6 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2768896 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 676) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6n/c6nql2xqcjwzvj7fwnghziaku6pz5bkd6yc3g6m6j7i5usxkl42g.py # Topologically Sorted Source Nodes: [conv2d_8, x_7], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_8 => convolution_8 # x_7 => relu_7 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2359296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 576) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/fv/cfv7cvlie7bm4hhp53sgv2tcymuivbnlot2lr5s6326qobukz4fr.py # Topologically Sorted Source Nodes: [x4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x4 => getitem_7 # Graph fragment: # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 589824 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hw/chwn2yjtf6olmjz6ha33eiw5qpgdpvig432t3xx3en6owpgs2xal.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_8 => convert_element_type_1 # Graph fragment: # %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) triton_poi_fused__to_copy_13 = async_compile.triton('triton_poi_fused__to_copy_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_13(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/u3/cu3h3d26wolkgujjglu2owjyged3zb5julk4m77ep4cz7coezxh4.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_8 => add, clamp_max # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {}) # %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 11), kwargs = {}) triton_poi_fused_add_clamp_14 = async_compile.triton('triton_poi_fused_add_clamp_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_14(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 11, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xv/cxvib6tvr6x22kntxgj24mtmwft3yiexpre6v3x4jxda45x4a36c.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] # Source node to ATen node mapping: # x_8 => clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (24,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0.4782608695652174), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) triton_poi_fused__to_copy_arange_clamp_mul_sub_15 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_15(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yj/cyj3biurj7ixh2rddwm6e2qdlvq5hpx3na4lugr7c2524eqwlpvp.py # Topologically Sorted Source Nodes: [x4, x_8], Original ATen: [aten.max_pool2d_with_indices, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # x4 => _low_memory_max_pool2d_with_offsets_3 # x_8 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, mul_2, mul_3, mul_4, sub_1, sub_2, sub_4 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_7, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_6, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_6, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_6, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_6, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16 = async_compile.triton('triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*i64', 5: '*i64', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK : tl.constexpr): xnumel = 2359296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 24) % 24 x0 = xindex % 24 x2 = (xindex // 576) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 12, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + ((2*tmp8) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + (2*tmp8) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.load(in_ptr2 + (24 + (2*tmp8) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr2 + (25 + (2*tmp8) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + ((2*tmp8) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (1 + (2*tmp8) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp23 = tl.load(in_ptr2 + (24 + (2*tmp8) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp25 = tl.load(in_ptr2 + (25 + (2*tmp8) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = tmp27 + tmp1 tmp29 = tmp27 < 0 tmp30 = tl.where(tmp29, tmp28, tmp27) tmp31 = tl.load(in_ptr2 + ((2*tmp30) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1 + (2*tmp30) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp33 = triton_helpers.maximum(tmp32, tmp31) tmp34 = tl.load(in_ptr2 + (24 + (2*tmp30) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp35 = triton_helpers.maximum(tmp34, tmp33) tmp36 = tl.load(in_ptr2 + (25 + (2*tmp30) + (48*tmp19) + (576*x2)), None, eviction_policy='evict_last') tmp37 = triton_helpers.maximum(tmp36, tmp35) tmp38 = tmp37 - tmp26 tmp40 = tmp38 * tmp39 tmp41 = tmp26 + tmp40 tmp42 = tl.load(in_ptr2 + ((2*tmp30) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + (1 + (2*tmp30) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp44 = triton_helpers.maximum(tmp43, tmp42) tmp45 = tl.load(in_ptr2 + (24 + (2*tmp30) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp46 = triton_helpers.maximum(tmp45, tmp44) tmp47 = tl.load(in_ptr2 + (25 + (2*tmp30) + (48*tmp4) + (576*x2)), None, eviction_policy='evict_last') tmp48 = triton_helpers.maximum(tmp47, tmp46) tmp49 = tmp48 - tmp15 tmp50 = tmp49 * tmp39 tmp51 = tmp15 + tmp50 tmp52 = tmp51 - tmp41 tmp54 = tmp52 * tmp53 tmp55 = tmp41 + tmp54 tl.store(in_out_ptr1 + (x4), tmp55, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tf/ctfd4tfhre2htxndzu4bpjzosx5oj4yooma3u75m553it7aschoi.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_10 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %convolution_9], 1), kwargs = {}) triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2166784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 529) % 1024 x0 = xindex % 23 x1 = (xindex // 23) % 23 x3 = (xindex // 541696) x4 = xindex % 529 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (87 + x0 + (28*x1) + (784*x2) + (401408*x3)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 1024, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x4 + (529*((-512) + x2)) + (270848*x3)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + ((-512) + x2), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x5), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/on/conr2sudxacy5il2ma633r4pnt6wtwr3q5ec63jng6ggljdfwv2n.py # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_11 => convolution_10 # x_12 => relu_8 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 903168 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 441) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3j/c3jb774iqqdz2p3sg7y5czdq627qsrdrnzzifbsu4y73zgpctzk3.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_14 => convert_element_type_5 # Graph fragment: # %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {}) triton_poi_fused__to_copy_19 = async_compile.triton('triton_poi_fused__to_copy_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_19(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gg/cggtdprorym62f6ywtdets3rljai47k2t3u4alrnxdy5pxltjfv2.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_14 => add_5, clamp_max_4 # Graph fragment: # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {}) # %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_5, 18), kwargs = {}) triton_poi_fused_add_clamp_20 = async_compile.triton('triton_poi_fused_add_clamp_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_20(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 18, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yj/cyjdtrthvyamlcdmzhzgl5mgtowpjzyhnjwy6r5hsgye6xv2hems.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] # Source node to ATen node mapping: # x_14 => clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_5, sub_5 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (38,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_4, 0.4864864864864865), kwargs = {}) # %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_5, 0.0), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {}) # %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {}) triton_poi_fused__to_copy_arange_clamp_mul_sub_21 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_21(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/k6/ck6mwx4fz77ge46mallseejfelurqagf2qfovdyod65sta6jnr3j.py # Topologically Sorted Source Nodes: [x_13, x5, x_14], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # x5 => relu_9 # x_13 => convolution_11 # x_14 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_7, add_8, add_9, mul_7, mul_8, mul_9, sub_6, sub_7, sub_9 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_24, %primals_25, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_9, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_9, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_9, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_9, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_6), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_6), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_7), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_7), kwargs = {}) # %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_9), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 2957312 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 38) % 38 x0 = xindex % 38 x5 = (xindex // 1444) x2 = (xindex // 1444) % 512 x4 = xindex % 1444 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 19, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (19*tmp4) + (361*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + (19*tmp4) + (361*x5)), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + (19*tmp28) + (361*x5)), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + (19*tmp28) + (361*x5)), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + (x6), tmp41, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nh/cnhgz7i3n54ujzvl5kid7v74xxqvukolcu37jdkvifnxnft3uqas.py # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_16 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_8, %convolution_12], 1), kwargs = {}) triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2803712 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 1369) % 512 x0 = xindex % 37 x1 = (xindex // 37) % 37 x3 = (xindex // 700928) x4 = xindex % 1369 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (744 + x0 + (61*x1) + (3744*x2) + (958464*x3)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 512, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x4 + (1369*((-256) + x2)) + (350464*x3)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + ((-256) + x2), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x5), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6q/c6qr4zr2k5ipho4u2xdornqef6axbetoglshkgtr5rbwpafyewqy.py # Topologically Sorted Source Nodes: [x_17, x_18], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_17 => convolution_13 # x_18 => relu_10 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) triton_poi_fused_convolution_relu_24 = async_compile.triton('triton_poi_fused_convolution_relu_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1254400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 1225) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/fb/cfb6qwktpwkzuucdy5mu24xl5oqwwxsqn2k4dcbrflxe7zksttse.py # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_20 => convert_element_type_9 # Graph fragment: # %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {}) triton_poi_fused__to_copy_25 = async_compile.triton('triton_poi_fused__to_copy_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_25(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5u/c5uialgvew2id65whlokoehgct3oy63hbbecjgrkzowuui3zs4v6.py # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_20 => add_10, clamp_max_8 # Graph fragment: # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {}) # %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_10, 32), kwargs = {}) triton_poi_fused_add_clamp_26 = async_compile.triton('triton_poi_fused_add_clamp_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_26(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 32, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qc/cqc4sze4op62jq3aaiysuqvsvzutbngdxfuayebx2sntjfk26tv7.py # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] # Source node to ATen node mapping: # x_20 => clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_10, sub_10 # Graph fragment: # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (66,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_8, 0.49230769230769234), kwargs = {}) # %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_10, 0.0), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {}) # %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0.0), kwargs = {}) # %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {}) triton_poi_fused__to_copy_arange_clamp_mul_sub_27 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_27', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_27(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ay/cayke6phmc5ruf64l7c2fxvrssqa65zdqwvokzzsq56lm5tieb37.py # Topologically Sorted Source Nodes: [x_19, x6, x_20], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # x6 => relu_11 # x_19 => convolution_14 # x_20 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_12, add_13, add_14, mul_12, mul_13, mul_14, sub_11, sub_12, sub_14 # Graph fragment: # %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_30, %primals_31, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_11 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_14,), kwargs = {}) # %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_11, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {}) # %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_11, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {}) # %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_11, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {}) # %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_11, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_10), kwargs = {}) # %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_10), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {}) # %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_11), kwargs = {}) # %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_14), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 4460544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 66) % 66 x0 = xindex % 66 x5 = (xindex // 4356) x2 = (xindex // 4356) % 256 x4 = xindex % 4356 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 33, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (33*tmp4) + (1089*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + (33*tmp4) + (1089*x5)), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + (33*tmp28) + (1089*x5)), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + (33*tmp28) + (1089*x5)), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + (x6), tmp41, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bv/cbvofiwshtn2ilsjcfqbxjulbgi57jsffzalavgps4p5jxyrux5n.py # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_22 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_12, %convolution_15], 1), kwargs = {}) triton_poi_fused_cat_29 = async_compile.triton('triton_poi_fused_cat_29', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4326400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 4225) % 256 x0 = xindex % 65 x1 = (xindex // 65) % 65 x3 = (xindex // 1081600) x4 = xindex % 4225 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (3937 + x0 + (126*x1) + (15904*x2) + (2035712*x3)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 256, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x4 + (4225*((-128) + x2)) + (540800*x3)), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + ((-128) + x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/b3/cb3hjpgy6vwvm3yslwxbk7whvi5itjamklpvrk2rtuqdfldix3iu.py # Topologically Sorted Source Nodes: [x_23, x_24], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_23 => convolution_16 # x_24 => relu_12 # Graph fragment: # %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_34, %primals_35, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_16,), kwargs = {}) triton_poi_fused_convolution_relu_30 = async_compile.triton('triton_poi_fused_convolution_relu_30', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_30(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2032128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3969) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zz/czzfeqovk6o67sidpdqeitbmmqewekpjeccszeh5iyiwv2txqgiy.py # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_26 => convert_element_type_13 # Graph fragment: # %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {}) triton_poi_fused__to_copy_31 = async_compile.triton('triton_poi_fused__to_copy_31', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_31(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gg/cggv2otvxeif2lhukmsds57uzturzpucyt6njwgarjcx7dljvmeb.py # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_26 => add_15, clamp_max_12 # Graph fragment: # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {}) # %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 60), kwargs = {}) triton_poi_fused_add_clamp_32 = async_compile.triton('triton_poi_fused_add_clamp_32', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_32(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 60, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4g/c4ghrkbjg7ns4pmhuqril4htj6shswlmwllposcgqw43xlrzmjc4.py # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] # Source node to ATen node mapping: # x_26 => clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_15, sub_15 # Graph fragment: # %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (122,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_12, 0.49586776859504134), kwargs = {}) # %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_15, 0.0), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {}) # %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0.0), kwargs = {}) # %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {}) triton_poi_fused__to_copy_arange_clamp_mul_sub_33 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_33', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_33(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ec/cecry7owhi2totr44b4dpmcxjl24xyb2kp7e62cm4qojaodksvfz.py # Topologically Sorted Source Nodes: [x_25, x7, x_26], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # x7 => relu_13 # x_25 => convolution_17 # x_26 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_17, add_18, add_19, mul_17, mul_18, mul_19, sub_16, sub_17, sub_19 # Graph fragment: # %convolution_17 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_12, %primals_36, %primals_37, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_13 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_17,), kwargs = {}) # %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_13, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {}) # %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_13, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {}) # %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_13, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {}) # %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_13, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {}) # %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max_14), kwargs = {}) # %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {}) # %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_14), kwargs = {}) # %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {}) # %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_18, %add_17), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %clamp_max_15), kwargs = {}) # %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_19), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 7620608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 122) % 122 x0 = xindex % 122 x5 = (xindex // 14884) x2 = (xindex // 14884) % 128 x4 = xindex % 14884 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 61, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (61*tmp4) + (3721*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + (61*tmp4) + (3721*x5)), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + (61*tmp28) + (3721*x5)), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + (61*tmp28) + (3721*x5)), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + (x6), tmp41, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/42/c422optynwcjab3762iylxhsctfppyldgwvyc7zcbj26fixwo6rs.py # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_28 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_16, %convolution_18], 1), kwargs = {}) triton_poi_fused_cat_35 = async_compile.triton('triton_poi_fused_cat_35', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7496192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 14641) % 128 x0 = xindex % 121 x1 = (xindex // 121) % 121 x3 = (xindex // 1874048) x4 = xindex % 14641 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (17476 + x0 + (256*x1) + (65536*x2) + (4194304*x3)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 128, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x4 + (14641*((-64) + x2)) + (937024*x3)), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + ((-64) + x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/p7/cp7wl5talmcgbtgcvqx7mdmxi3amg2b7urkr6rpkmstlue7whb33.py # Topologically Sorted Source Nodes: [x_29, x_30], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_29 => convolution_19 # x_30 => relu_14 # Graph fragment: # %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_40, %primals_41, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_19,), kwargs = {}) triton_poi_fused_convolution_relu_36 = async_compile.triton('triton_poi_fused_convolution_relu_36', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_36', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3625216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 14161) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/pp/cppyhkmzi7gre5zajamdclp4rjklyo6kmnmokugu4wutyydqtcto.py # Topologically Sorted Source Nodes: [x_31, x8], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x8 => relu_15 # x_31 => convolution_20 # Graph fragment: # %convolution_20 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_42, %primals_43, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_20,), kwargs = {}) triton_poi_fused_convolution_relu_37 = async_compile.triton('triton_poi_fused_convolution_relu_37', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_37', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3504384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 13689) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/e6/ce6sqlqj4je5yw66cw2fng6k3db7jmerlkwisdwopqx3yqx5o57p.py # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_21 => convolution_21 # Graph fragment: # %convolution_21 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_15, %primals_44, %primals_45, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_38 = async_compile.triton('triton_poi_fused_convolution_38', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_38', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 219024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 13689) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cn/ccnipirggi5fycaqhkfzu3v3ofw53wdpzlcu33td25pkablcsjbk.py # Topologically Sorted Source Nodes: [x_25, x7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x7 => relu_13 # x_25 => convolution_17 # Graph fragment: # %convolution_17 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_12, %primals_36, %primals_37, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_13 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_17,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_13, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_39 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_39', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_39', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_39(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1905152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3721) % 128 x0 = xindex % 3721 x4 = (xindex // 3721) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (3840*x4)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bj/cbjw6tqdnrpub4rhjcujvw4d2ebhzkwkcw7qw3t2fk2e7bjlyxm6.py # Topologically Sorted Source Nodes: [x_19, x6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x6 => relu_11 # x_19 => convolution_14 # Graph fragment: # %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_30, %primals_31, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_11 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_14,), kwargs = {}) # %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_11, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_40 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_40', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 1089) % 256 x0 = xindex % 1089 x4 = (xindex // 1089) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (1152*x4)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jm/cjmqugdti4xnjenshb2rdtv5pnp2wowzw724r36w6s36e3e4gbru.py # Topologically Sorted Source Nodes: [x_13, x5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x5 => relu_9 # x_13 => convolution_11 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_24, %primals_25, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) # %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_41 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_41', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_41(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 739328 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 361) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45 = args args.clear() assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (512, ), (1, )) assert_size_stride(primals_14, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_15, (512, ), (1, )) assert_size_stride(primals_16, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (1024, ), (1, )) assert_size_stride(primals_18, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_19, (1024, ), (1, )) assert_size_stride(primals_20, (512, 1024, 2, 2), (4096, 4, 2, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (256, 512, 2, 2), (2048, 4, 2, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (256, ), (1, )) assert_size_stride(primals_30, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (256, ), (1, )) assert_size_stride(primals_32, (128, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_33, (128, ), (1, )) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128, ), (1, )) assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (128, ), (1, )) assert_size_stride(primals_38, (64, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_39, (64, ), (1, )) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64, ), (1, )) assert_size_stride(primals_42, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (64, ), (1, )) assert_size_stride(primals_44, (4, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_45, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x0], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 256, 256), (4194304, 65536, 256, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x0], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16777216, grid=grid(16777216), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 254, 254), (8258048, 64516, 254, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 33032192, grid=grid(33032192), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 252, 252), (8128512, 63504, 252, 1)) buf5 = empty_strided_cuda((4, 128, 252, 252), (8130560, 63520, 252, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_7, buf5, 32514048, grid=grid(32514048), stream=stream0) del buf4 del primals_7 buf6 = empty_strided_cuda((4, 128, 126, 126), (2035712, 15904, 126, 1), torch.float32) buf7 = empty_strided_cuda((4, 128, 126, 126), (2048000, 16000, 126, 1), torch.int8) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 8128512, grid=grid(8128512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 124, 124), (3936256, 15376, 124, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_9, 15745024, grid=grid(15745024), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 122, 122), (3810304, 14884, 122, 1)) buf11 = empty_strided_cuda((4, 256, 122, 122), (3817472, 14912, 122, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_4, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf10, primals_11, buf11, 15241216, grid=grid(15241216), stream=stream0) del buf10 del primals_11 buf12 = empty_strided_cuda((4, 256, 61, 61), (958464, 3744, 61, 1), torch.float32) buf13 = empty_strided_cuda((4, 256, 61, 61), (983040, 3840, 61, 1), torch.int8) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_6.run(buf11, buf12, buf13, 3810304, grid=grid(3810304), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 59, 59), (1782272, 3481, 59, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_5, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf15, primals_13, 7129088, grid=grid(7129088), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 512, 57, 57), (1663488, 3249, 57, 1)) buf17 = empty_strided_cuda((4, 512, 57, 57), (1671168, 3264, 57, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_6, x_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf16, primals_15, buf17, 6653952, grid=grid(6653952), stream=stream0) del buf16 del primals_15 buf18 = empty_strided_cuda((4, 512, 28, 28), (401408, 784, 28, 1), torch.float32) buf19 = empty_strided_cuda((4, 512, 28, 28), (401408, 784, 28, 1), torch.int8) # Topologically Sorted Source Nodes: [x3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_9.run(buf17, buf18, buf19, 1605632, grid=grid(1605632), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 1024, 26, 26), (692224, 676, 26, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_7, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf21, primals_17, 2768896, grid=grid(2768896), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 1024, 24, 24), (589824, 576, 24, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [conv2d_8, x_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf23, primals_19, 2359296, grid=grid(2359296), stream=stream0) del primals_19 buf24 = empty_strided_cuda((4, 1024, 12, 12), (147456, 144, 12, 1), torch.int8) # Topologically Sorted Source Nodes: [x4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_12.run(buf23, buf24, 589824, grid=grid(589824), stream=stream0) buf25 = empty_strided_cuda((24, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_13.run(buf25, 24, grid=grid(24), stream=stream0) buf26 = empty_strided_cuda((24, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_14.run(buf26, 24, grid=grid(24), stream=stream0) buf27 = empty_strided_cuda((24, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp] triton_poi_fused__to_copy_13.run(buf27, 24, grid=grid(24), stream=stream0) buf28 = empty_strided_cuda((24, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_14.run(buf28, 24, grid=grid(24), stream=stream0) buf31 = empty_strided_cuda((24, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] triton_poi_fused__to_copy_arange_clamp_mul_sub_15.run(buf31, 24, grid=grid(24), stream=stream0) buf33 = empty_strided_cuda((24, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_arange_clamp_mul_sub_15.run(buf33, 24, grid=grid(24), stream=stream0) buf30 = empty_strided_cuda((4, 1024, 24, 24), (589824, 576, 24, 1), torch.float32) buf34 = buf30; del buf30 # reuse buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [x4, x_8], Original ATen: [aten.max_pool2d_with_indices, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16.run(buf35, buf26, buf27, buf23, buf25, buf28, buf31, buf33, 2359296, grid=grid(2359296), stream=stream0) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 23, 23), (270848, 529, 23, 1)) buf37 = empty_strided_cuda((4, 1024, 23, 23), (541696, 529, 23, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf18, buf36, primals_21, buf37, 2166784, grid=grid(2166784), stream=stream0) del buf36 del primals_21 # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 21, 21), (225792, 441, 21, 1)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf39, primals_23, 903168, grid=grid(903168), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 19, 19), (184832, 361, 19, 1)) buf41 = empty_strided_cuda((38, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_19.run(buf41, 38, grid=grid(38), stream=stream0) buf42 = empty_strided_cuda((38, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_20.run(buf42, 38, grid=grid(38), stream=stream0) buf43 = empty_strided_cuda((38, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp] triton_poi_fused__to_copy_19.run(buf43, 38, grid=grid(38), stream=stream0) buf44 = empty_strided_cuda((38, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_20.run(buf44, 38, grid=grid(38), stream=stream0) buf45 = empty_strided_cuda((38, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] triton_poi_fused__to_copy_arange_clamp_mul_sub_21.run(buf45, 38, grid=grid(38), stream=stream0) buf47 = empty_strided_cuda((38, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_arange_clamp_mul_sub_21.run(buf47, 38, grid=grid(38), stream=stream0) buf49 = empty_strided_cuda((4, 512, 38, 38), (739328, 1444, 38, 1), torch.float32) # Topologically Sorted Source Nodes: [x_13, x5, x_14], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22.run(buf41, buf43, buf40, primals_25, buf44, buf45, buf42, buf47, buf49, 2957312, grid=grid(2957312), stream=stream0) # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 256, 37, 37), (350464, 1369, 37, 1)) buf51 = empty_strided_cuda((4, 512, 37, 37), (700928, 1369, 37, 1), torch.float32) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.cat] triton_poi_fused_cat_23.run(buf12, buf50, primals_27, buf51, 2803712, grid=grid(2803712), stream=stream0) del buf50 del primals_27 # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 256, 35, 35), (313600, 1225, 35, 1)) buf53 = buf52; del buf52 # reuse # Topologically Sorted Source Nodes: [x_17, x_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_24.run(buf53, primals_29, 1254400, grid=grid(1254400), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf53, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 33, 33), (278784, 1089, 33, 1)) buf55 = empty_strided_cuda((66, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_25.run(buf55, 66, grid=grid(66), stream=stream0) buf56 = empty_strided_cuda((66, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_26.run(buf56, 66, grid=grid(66), stream=stream0) buf57 = empty_strided_cuda((66, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp] triton_poi_fused__to_copy_25.run(buf57, 66, grid=grid(66), stream=stream0) buf58 = empty_strided_cuda((66, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_26.run(buf58, 66, grid=grid(66), stream=stream0) buf59 = empty_strided_cuda((66, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] triton_poi_fused__to_copy_arange_clamp_mul_sub_27.run(buf59, 66, grid=grid(66), stream=stream0) buf61 = empty_strided_cuda((66, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_arange_clamp_mul_sub_27.run(buf61, 66, grid=grid(66), stream=stream0) buf63 = empty_strided_cuda((4, 256, 66, 66), (1115136, 4356, 66, 1), torch.float32) # Topologically Sorted Source Nodes: [x_19, x6, x_20], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28.run(buf55, buf57, buf54, primals_31, buf58, buf59, buf56, buf61, buf63, 4460544, grid=grid(4460544), stream=stream0) # Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf63, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 128, 65, 65), (540800, 4225, 65, 1)) buf65 = empty_strided_cuda((4, 256, 65, 65), (1081600, 4225, 65, 1), torch.float32) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.cat] triton_poi_fused_cat_29.run(buf6, buf64, primals_33, buf65, 4326400, grid=grid(4326400), stream=stream0) del buf64 del primals_33 # Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution] buf66 = extern_kernels.convolution(buf65, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 128, 63, 63), (508032, 3969, 63, 1)) buf67 = buf66; del buf66 # reuse # Topologically Sorted Source Nodes: [x_23, x_24], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_30.run(buf67, primals_35, 2032128, grid=grid(2032128), stream=stream0) del primals_35 # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.convolution] buf68 = extern_kernels.convolution(buf67, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 128, 61, 61), (476288, 3721, 61, 1)) buf69 = empty_strided_cuda((122, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_31.run(buf69, 122, grid=grid(122), stream=stream0) buf70 = empty_strided_cuda((122, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_32.run(buf70, 122, grid=grid(122), stream=stream0) buf71 = empty_strided_cuda((122, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp] triton_poi_fused__to_copy_31.run(buf71, 122, grid=grid(122), stream=stream0) buf72 = empty_strided_cuda((122, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_32.run(buf72, 122, grid=grid(122), stream=stream0) buf73 = empty_strided_cuda((122, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] triton_poi_fused__to_copy_arange_clamp_mul_sub_33.run(buf73, 122, grid=grid(122), stream=stream0) buf75 = empty_strided_cuda((122, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_arange_clamp_mul_sub_33.run(buf75, 122, grid=grid(122), stream=stream0) buf77 = empty_strided_cuda((4, 128, 122, 122), (1905152, 14884, 122, 1), torch.float32) # Topologically Sorted Source Nodes: [x_25, x7, x_26], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34.run(buf69, buf71, buf68, primals_37, buf72, buf73, buf70, buf75, buf77, 7620608, grid=grid(7620608), stream=stream0) # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] buf78 = extern_kernels.convolution(buf77, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf78, (4, 64, 121, 121), (937024, 14641, 121, 1)) buf79 = empty_strided_cuda((4, 128, 121, 121), (1874048, 14641, 121, 1), torch.float32) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.cat] triton_poi_fused_cat_35.run(buf1, buf78, primals_39, buf79, 7496192, grid=grid(7496192), stream=stream0) del buf78 del primals_39 # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.convolution] buf80 = extern_kernels.convolution(buf79, primals_40, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf80, (4, 64, 119, 119), (906304, 14161, 119, 1)) buf81 = buf80; del buf80 # reuse # Topologically Sorted Source Nodes: [x_29, x_30], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_36.run(buf81, primals_41, 3625216, grid=grid(3625216), stream=stream0) del primals_41 # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution] buf82 = extern_kernels.convolution(buf81, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf82, (4, 64, 117, 117), (876096, 13689, 117, 1)) buf83 = buf82; del buf82 # reuse # Topologically Sorted Source Nodes: [x_31, x8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_37.run(buf83, primals_43, 3504384, grid=grid(3504384), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf84 = extern_kernels.convolution(buf83, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 4, 117, 117), (54756, 13689, 117, 1)) buf85 = buf84; del buf84 # reuse # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] triton_poi_fused_convolution_38.run(buf85, primals_45, 219024, grid=grid(219024), stream=stream0) del primals_45 buf86 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1), torch.bool) # Topologically Sorted Source Nodes: [x_25, x7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_39.run(buf68, primals_37, buf86, 1905152, grid=grid(1905152), stream=stream0) del buf68 del primals_37 buf87 = empty_strided_cuda((4, 256, 33, 33), (294912, 1152, 33, 1), torch.bool) # Topologically Sorted Source Nodes: [x_19, x6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_40.run(buf54, primals_31, buf87, 1115136, grid=grid(1115136), stream=stream0) del buf54 del primals_31 buf88 = empty_strided_cuda((4, 512, 19, 19), (184832, 361, 19, 1), torch.bool) # Topologically Sorted Source Nodes: [x_13, x5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_41.run(buf40, primals_25, buf88, 739328, grid=grid(739328), stream=stream0) del buf40 del primals_25 return (buf85, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, buf1, buf3, buf5, buf6, buf7, buf9, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf26, buf27, buf28, buf31, buf33, buf35, buf37, buf39, buf41, buf42, buf43, buf44, buf45, buf47, buf49, buf51, buf53, buf55, buf56, buf57, buf58, buf59, buf61, buf63, buf65, buf67, buf69, buf70, buf71, buf72, buf73, buf75, buf77, buf79, buf81, buf83, buf86, buf87, buf88, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 256, 256), (262144, 65536, 256, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 1024, 2, 2), (4096, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, 512, 2, 2), (2048, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((64, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((4, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def crop(image, new_shape): plus_h, plus_w = 0, 0 if new_shape[2] % 2 != 0: plus_h = 1 if new_shape[3] % 2 != 0: plus_w = 1 middle_height = image.shape[2] // 2 middle_weight = image.shape[3] // 2 go_height = new_shape[2] // 2 go_weight = new_shape[3] // 2 cropped_image = image[:, :, middle_height - go_height:middle_height + go_height + plus_h, middle_weight - go_weight:middle_weight + go_weight + plus_w] return cropped_image class ContractingBlock(nn.Module): def __init__(self, input_channel): super(ContractingBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=input_channel, out_channels= input_channel * 2, kernel_size=(3, 3)) self.conv2 = nn.Conv2d(input_channel * 2, input_channel * 2, kernel_size=(3, 3)) self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) self.activation = nn.ReLU() def forward(self, x): x = self.activation(self.conv1(x)) x = self.activation(self.conv2(x)) return self.maxpool(x) class ExpandingBlock(nn.Module): def __init__(self, input_channels): super(ExpandingBlock, self).__init__() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(2, 2)) self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(3, 3)) self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=(3, 3)) self.activation = nn.ReLU() def forward(self, skip_con_x, x): x = self.upsample(x) x = self.conv1(x) skip_con_x = crop(skip_con_x, x.shape) x = torch.cat([skip_con_x, x], axis=1) x = self.conv2(x) x = self.activation(x) x = self.conv3(x) return self.activation(x) class FeatureMapBlock(nn.Module): def __init__(self, input_channels, output_channels): super(FeatureMapBlock, self).__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= (1, 1)) def forward(self, x): return self.conv(x) class Unet(nn.Module): def __init__(self, input_channels, output_channels, hidden_channels=64): super(Unet, self).__init__() self.upfeature = FeatureMapBlock(input_channels, hidden_channels) self.contract1 = ContractingBlock(hidden_channels) self.contract2 = ContractingBlock(hidden_channels * 2) self.contract3 = ContractingBlock(hidden_channels * 4) self.contract4 = ContractingBlock(hidden_channels * 8) self.expanding1 = ExpandingBlock(hidden_channels * 16) self.expanding2 = ExpandingBlock(hidden_channels * 8) self.expanding3 = ExpandingBlock(hidden_channels * 4) self.expanding4 = ExpandingBlock(hidden_channels * 2) self.downfeature = FeatureMapBlock(hidden_channels, output_channels) def forward(self, x): x0 = self.upfeature(x) x1 = self.contract1(x0) x2 = self.contract2(x1) x3 = self.contract3(x2) x4 = self.contract4(x3) x5 = self.expanding1(skip_con_x=x3, x=x4) x6 = self.expanding2(skip_con_x=x2, x=x5) x7 = self.expanding3(skip_con_x=x1, x=x6) x8 = self.expanding4(skip_con_x=x0, x=x7) return self.downfeature(x8) def get_inputs(): return [torch.rand([4, 4, 256, 256])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64516 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 63504 % 128 x0 = xindex % 63504 x4 = xindex // 63504 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 63520 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 126 x1 = xindex // 126 % 126 x2 = xindex // 15876 x3 = xindex % 15876 tmp0 = tl.load(in_ptr0 + (2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (252 + 2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (253 + 2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 15904 * x2), tmp6, None) tl.store(out_ptr1 + (x3 + 16000 * x2), tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 15376 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 14884 % 256 x0 = xindex % 14884 x4 = xindex // 14884 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 14912 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3810304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 61 x1 = xindex // 61 % 61 x2 = xindex // 3721 x3 = xindex % 3721 tmp0 = tl.load(in_ptr0 + (2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (122 + 2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (123 + 2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 3744 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 3840 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3481 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3249 % 512 x0 = xindex % 3249 x4 = xindex // 3249 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3264 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 28 x1 = xindex // 28 % 28 x2 = xindex // 784 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (57 + 2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (58 + 2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 676 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 576 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) @triton.jit def triton_poi_fused__to_copy_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 11, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 24 % 24 x0 = xindex % 24 x2 = xindex // 576 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 12, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.load(in_ptr2 + (24 + 2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr2 + (25 + 2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (1 + 2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp23 = tl.load(in_ptr2 + (24 + 2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp25 = tl.load(in_ptr2 + (25 + 2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = tmp27 + tmp1 tmp29 = tmp27 < 0 tmp30 = tl.where(tmp29, tmp28, tmp27) tmp31 = tl.load(in_ptr2 + (2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1 + 2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp33 = triton_helpers.maximum(tmp32, tmp31) tmp34 = tl.load(in_ptr2 + (24 + 2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp35 = triton_helpers.maximum(tmp34, tmp33) tmp36 = tl.load(in_ptr2 + (25 + 2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp37 = triton_helpers.maximum(tmp36, tmp35) tmp38 = tmp37 - tmp26 tmp40 = tmp38 * tmp39 tmp41 = tmp26 + tmp40 tmp42 = tl.load(in_ptr2 + (2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + (1 + 2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp44 = triton_helpers.maximum(tmp43, tmp42) tmp45 = tl.load(in_ptr2 + (24 + 2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp46 = triton_helpers.maximum(tmp45, tmp44) tmp47 = tl.load(in_ptr2 + (25 + 2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp48 = triton_helpers.maximum(tmp47, tmp46) tmp49 = tmp48 - tmp15 tmp50 = tmp49 * tmp39 tmp51 = tmp15 + tmp50 tmp52 = tmp51 - tmp41 tmp54 = tmp52 * tmp53 tmp55 = tmp41 + tmp54 tl.store(in_out_ptr1 + x4, tmp55, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 529 % 1024 x0 = xindex % 23 x1 = xindex // 23 % 23 x3 = xindex // 541696 x4 = xindex % 529 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (87 + x0 + 28 * x1 + 784 * x2 + 401408 * x3), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 529 * (-512 + x2) + 270848 * x3), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-512 + x2), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 441 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_19(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_20(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 18, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_21(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 38 % 38 x0 = xindex % 38 x5 = xindex // 1444 x2 = xindex // 1444 % 512 xindex % 1444 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 19, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 19 * tmp4 + 361 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 19 * tmp4 + 361 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 19 * tmp28 + 361 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 19 * tmp28 + 361 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + x6, tmp41, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1369 % 512 x0 = xindex % 37 x1 = xindex // 37 % 37 x3 = xindex // 700928 x4 = xindex % 1369 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (744 + x0 + 61 * x1 + 3744 * x2 + 958464 * x3), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 1369 * (-256 + x2) + 350464 * x3), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-256 + x2), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_relu_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1254400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1225 % 256 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_25(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_26(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 32, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_27(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 66 % 66 x0 = xindex % 66 x5 = xindex // 4356 x2 = xindex // 4356 % 256 xindex % 4356 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 33, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 33 * tmp4 + 1089 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 33 * tmp4 + 1089 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 33 * tmp28 + 1089 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 33 * tmp28 + 1089 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + x6, tmp41, None) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4326400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4225 % 256 x0 = xindex % 65 x1 = xindex // 65 % 65 x3 = xindex // 1081600 x4 = xindex % 4225 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (3937 + x0 + 126 * x1 + 15904 * x2 + 2035712 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 4225 * (-128 + x2) + 540800 * x3), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-128 + x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_relu_30(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2032128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3969 % 128 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_31(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_32(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 60, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_33(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 122 % 122 x0 = xindex % 122 x5 = xindex // 14884 x2 = xindex // 14884 % 128 xindex % 14884 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 61, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 61 * tmp4 + 3721 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 61 * tmp4 + 3721 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 61 * tmp28 + 3721 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 61 * tmp28 + 3721 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + x6, tmp41, None) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7496192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 14641 % 128 x0 = xindex % 121 x1 = xindex // 121 % 121 x3 = xindex // 1874048 x4 = xindex % 14641 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (17476 + x0 + 256 * x1 + 65536 * x2 + 4194304 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 14641 * (-64 + x2) + 937024 * x3), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-64 + x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_relu_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3625216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 14161 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3504384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 13689 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 219024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 13689 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_39(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1905152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3721 % 128 x0 = xindex % 3721 x4 = xindex // 3721 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 3840 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1089 % 256 x0 = xindex % 1089 x4 = xindex // 1089 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 1152 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_41(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 361 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45) = args args.clear() assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (1024,), (1,)) assert_size_stride(primals_18, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_19, (1024,), (1,)) assert_size_stride(primals_20, (512, 1024, 2, 2), (4096, 4, 2, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (256, 512, 2, 2), (2048, 4, 2, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (128, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (64,), (1,)) assert_size_stride(primals_44, (4, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_45, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 256, 256), (4194304, 65536, 256, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16777216)](buf1, primals_2, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 254, 254), (8258048, 64516, 254, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(33032192)](buf3, primals_5, 33032192, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 252, 252), (8128512, 63504, 252, 1)) buf5 = empty_strided_cuda((4, 128, 252, 252), (8130560, 63520, 252, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(32514048)](buf4, primals_7, buf5, 32514048, XBLOCK=512, num_warps=8, num_stages=1) del buf4 del primals_7 buf6 = empty_strided_cuda((4, 128, 126, 126), (2035712, 15904, 126, 1), torch.float32) buf7 = empty_strided_cuda((4, 128, 126, 126), (2048000, 16000, 126, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(8128512)](buf5, buf6, buf7, 8128512, XBLOCK=1024, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 124, 124), (3936256, 15376, 124, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(15745024)](buf9, primals_9, 15745024, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 122, 122), (3810304, 14884, 122, 1)) buf11 = empty_strided_cuda((4, 256, 122, 122), (3817472, 14912, 122, 1), torch.float32) triton_poi_fused_convolution_relu_5[grid(15241216)](buf10, primals_11, buf11, 15241216, XBLOCK=1024, num_warps=4, num_stages=1 ) del buf10 del primals_11 buf12 = empty_strided_cuda((4, 256, 61, 61), (958464, 3744, 61, 1), torch.float32) buf13 = empty_strided_cuda((4, 256, 61, 61), (983040, 3840, 61, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(3810304)](buf11, buf12, buf13, 3810304, XBLOCK=512, num_warps=8, num_stages=1) buf14 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 59, 59), (1782272, 3481, 59, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_7[grid(7129088)](buf15, primals_13, 7129088, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 512, 57, 57), (1663488, 3249, 57, 1)) buf17 = empty_strided_cuda((4, 512, 57, 57), (1671168, 3264, 57, 1), torch.float32) triton_poi_fused_convolution_relu_8[grid(6653952)](buf16, primals_15, buf17, 6653952, XBLOCK=512, num_warps=8, num_stages=1) del buf16 del primals_15 buf18 = empty_strided_cuda((4, 512, 28, 28), (401408, 784, 28, 1), torch.float32) buf19 = empty_strided_cuda((4, 512, 28, 28), (401408, 784, 28, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_9[grid(1605632)](buf17, buf18, buf19, 1605632, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 1024, 26, 26), (692224, 676, 26, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_10[grid(2768896)](buf21, primals_17, 2768896, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 1024, 24, 24), (589824, 576, 24, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(2359296)](buf23, primals_19, 2359296, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf24 = empty_strided_cuda((4, 1024, 12, 12), (147456, 144, 12, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(589824)](buf23, buf24, 589824, XBLOCK=512, num_warps=8, num_stages=1) buf25 = empty_strided_cuda((24, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_13[grid(24)](buf25, 24, XBLOCK=32, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((24, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_14[grid(24)](buf26, 24, XBLOCK=32, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((24,), (1,), torch.int64) triton_poi_fused__to_copy_13[grid(24)](buf27, 24, XBLOCK=32, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((24,), (1,), torch.int64) triton_poi_fused_add_clamp_14[grid(24)](buf28, 24, XBLOCK=32, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((24,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_15[grid(24)](buf31, 24, XBLOCK=32, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((24, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_15[grid(24)](buf33, 24, XBLOCK=32, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((4, 1024, 24, 24), (589824, 576, 24, 1), torch.float32) buf34 = buf30 del buf30 buf35 = buf34 del buf34 triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16[ grid(2359296)](buf35, buf26, buf27, buf23, buf25, buf28, buf31, buf33, 2359296, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 23, 23), (270848, 529, 23, 1)) buf37 = empty_strided_cuda((4, 1024, 23, 23), (541696, 529, 23, 1), torch.float32) triton_poi_fused_cat_17[grid(2166784)](buf18, buf36, primals_21, buf37, 2166784, XBLOCK=1024, num_warps=4, num_stages=1) del buf36 del primals_21 buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 21, 21), (225792, 441, 21, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_18[grid(903168)](buf39, primals_23, 903168, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 19, 19), (184832, 361, 19, 1)) buf41 = empty_strided_cuda((38, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_19[grid(38)](buf41, 38, XBLOCK=64, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((38, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_20[grid(38)](buf42, 38, XBLOCK=64, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((38,), (1,), torch.int64) triton_poi_fused__to_copy_19[grid(38)](buf43, 38, XBLOCK=64, num_warps=1, num_stages=1) buf44 = empty_strided_cuda((38,), (1,), torch.int64) triton_poi_fused_add_clamp_20[grid(38)](buf44, 38, XBLOCK=64, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((38,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_21[grid(38)](buf45, 38, XBLOCK=64, num_warps=1, num_stages=1) buf47 = empty_strided_cuda((38, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_21[grid(38)](buf47, 38, XBLOCK=64, num_warps=1, num_stages=1) buf49 = empty_strided_cuda((4, 512, 38, 38), (739328, 1444, 38, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22[grid (2957312)](buf41, buf43, buf40, primals_25, buf44, buf45, buf42, buf47, buf49, 2957312, XBLOCK=1024, num_warps=4, num_stages=1) buf50 = extern_kernels.convolution(buf49, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 256, 37, 37), (350464, 1369, 37, 1)) buf51 = empty_strided_cuda((4, 512, 37, 37), (700928, 1369, 37, 1), torch.float32) triton_poi_fused_cat_23[grid(2803712)](buf12, buf50, primals_27, buf51, 2803712, XBLOCK=1024, num_warps=4, num_stages=1) del buf50 del primals_27 buf52 = extern_kernels.convolution(buf51, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 256, 35, 35), (313600, 1225, 35, 1)) buf53 = buf52 del buf52 triton_poi_fused_convolution_relu_24[grid(1254400)](buf53, primals_29, 1254400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf54 = extern_kernels.convolution(buf53, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 33, 33), (278784, 1089, 33, 1)) buf55 = empty_strided_cuda((66, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_25[grid(66)](buf55, 66, XBLOCK=128, num_warps=4, num_stages=1) buf56 = empty_strided_cuda((66, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_26[grid(66)](buf56, 66, XBLOCK=128, num_warps=4, num_stages=1) buf57 = empty_strided_cuda((66,), (1,), torch.int64) triton_poi_fused__to_copy_25[grid(66)](buf57, 66, XBLOCK=128, num_warps=4, num_stages=1) buf58 = empty_strided_cuda((66,), (1,), torch.int64) triton_poi_fused_add_clamp_26[grid(66)](buf58, 66, XBLOCK=128, num_warps=4, num_stages=1) buf59 = empty_strided_cuda((66,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_27[grid(66)](buf59, 66, XBLOCK=128, num_warps=4, num_stages=1) buf61 = empty_strided_cuda((66, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_27[grid(66)](buf61, 66, XBLOCK=128, num_warps=4, num_stages=1) buf63 = empty_strided_cuda((4, 256, 66, 66), (1115136, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28[grid (4460544)](buf55, buf57, buf54, primals_31, buf58, buf59, buf56, buf61, buf63, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) buf64 = extern_kernels.convolution(buf63, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 128, 65, 65), (540800, 4225, 65, 1)) buf65 = empty_strided_cuda((4, 256, 65, 65), (1081600, 4225, 65, 1), torch.float32) triton_poi_fused_cat_29[grid(4326400)](buf6, buf64, primals_33, buf65, 4326400, XBLOCK=512, num_warps=8, num_stages=1) del buf64 del primals_33 buf66 = extern_kernels.convolution(buf65, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 128, 63, 63), (508032, 3969, 63, 1)) buf67 = buf66 del buf66 triton_poi_fused_convolution_relu_30[grid(2032128)](buf67, primals_35, 2032128, XBLOCK=1024, num_warps=4, num_stages=1) del primals_35 buf68 = extern_kernels.convolution(buf67, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 128, 61, 61), (476288, 3721, 61, 1)) buf69 = empty_strided_cuda((122, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_31[grid(122)](buf69, 122, XBLOCK=128, num_warps=4, num_stages=1) buf70 = empty_strided_cuda((122, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_32[grid(122)](buf70, 122, XBLOCK=128, num_warps=4, num_stages=1) buf71 = empty_strided_cuda((122,), (1,), torch.int64) triton_poi_fused__to_copy_31[grid(122)](buf71, 122, XBLOCK=128, num_warps=4, num_stages=1) buf72 = empty_strided_cuda((122,), (1,), torch.int64) triton_poi_fused_add_clamp_32[grid(122)](buf72, 122, XBLOCK=128, num_warps=4, num_stages=1) buf73 = empty_strided_cuda((122,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_33[grid(122)](buf73, 122, XBLOCK=128, num_warps=4, num_stages=1) buf75 = empty_strided_cuda((122, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_33[grid(122)](buf75, 122, XBLOCK=128, num_warps=4, num_stages=1) buf77 = empty_strided_cuda((4, 128, 122, 122), (1905152, 14884, 122, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34[grid (7620608)](buf69, buf71, buf68, primals_37, buf72, buf73, buf70, buf75, buf77, 7620608, XBLOCK=1024, num_warps=4, num_stages=1) buf78 = extern_kernels.convolution(buf77, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf78, (4, 64, 121, 121), (937024, 14641, 121, 1)) buf79 = empty_strided_cuda((4, 128, 121, 121), (1874048, 14641, 121, 1), torch.float32) triton_poi_fused_cat_35[grid(7496192)](buf1, buf78, primals_39, buf79, 7496192, XBLOCK=1024, num_warps=4, num_stages=1) del buf78 del primals_39 buf80 = extern_kernels.convolution(buf79, primals_40, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf80, (4, 64, 119, 119), (906304, 14161, 119, 1)) buf81 = buf80 del buf80 triton_poi_fused_convolution_relu_36[grid(3625216)](buf81, primals_41, 3625216, XBLOCK=1024, num_warps=4, num_stages=1) del primals_41 buf82 = extern_kernels.convolution(buf81, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf82, (4, 64, 117, 117), (876096, 13689, 117, 1)) buf83 = buf82 del buf82 triton_poi_fused_convolution_relu_37[grid(3504384)](buf83, primals_43, 3504384, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf84 = extern_kernels.convolution(buf83, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 4, 117, 117), (54756, 13689, 117, 1)) buf85 = buf84 del buf84 triton_poi_fused_convolution_38[grid(219024)](buf85, primals_45, 219024, XBLOCK=512, num_warps=8, num_stages=1) del primals_45 buf86 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_39[grid(1905152)]( buf68, primals_37, buf86, 1905152, XBLOCK=1024, num_warps=4, num_stages=1) del buf68 del primals_37 buf87 = empty_strided_cuda((4, 256, 33, 33), (294912, 1152, 33, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_40[grid(1115136)]( buf54, primals_31, buf87, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) del buf54 del primals_31 buf88 = empty_strided_cuda((4, 512, 19, 19), (184832, 361, 19, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_41[grid(739328)]( buf40, primals_25, buf88, 739328, XBLOCK=1024, num_warps=4, num_stages=1) del buf40 del primals_25 return (buf85, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, buf1, buf3, buf5, buf6, buf7, buf9, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf26, buf27, buf28, buf31, buf33, buf35, buf37, buf39, buf41, buf42, buf43, buf44, buf45, buf47, buf49, buf51, buf53, buf55, buf56, buf57, buf58, buf59, buf61, buf63, buf65, buf67, buf69, buf70, buf71, buf72, buf73, buf75, buf77, buf79, buf81, buf83, buf86, buf87, buf88) def crop(image, new_shape): plus_h, plus_w = 0, 0 if new_shape[2] % 2 != 0: plus_h = 1 if new_shape[3] % 2 != 0: plus_w = 1 middle_height = image.shape[2] // 2 middle_weight = image.shape[3] // 2 go_height = new_shape[2] // 2 go_weight = new_shape[3] // 2 cropped_image = image[:, :, middle_height - go_height:middle_height + go_height + plus_h, middle_weight - go_weight:middle_weight + go_weight + plus_w] return cropped_image class ContractingBlock(nn.Module): def __init__(self, input_channel): super(ContractingBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=input_channel, out_channels= input_channel * 2, kernel_size=(3, 3)) self.conv2 = nn.Conv2d(input_channel * 2, input_channel * 2, kernel_size=(3, 3)) self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) self.activation = nn.ReLU() def forward(self, x): x = self.activation(self.conv1(x)) x = self.activation(self.conv2(x)) return self.maxpool(x) class ExpandingBlock(nn.Module): def __init__(self, input_channels): super(ExpandingBlock, self).__init__() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(2, 2)) self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(3, 3)) self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=(3, 3)) self.activation = nn.ReLU() def forward(self, skip_con_x, x): x = self.upsample(x) x = self.conv1(x) skip_con_x = crop(skip_con_x, x.shape) x = torch.cat([skip_con_x, x], axis=1) x = self.conv2(x) x = self.activation(x) x = self.conv3(x) return self.activation(x) class FeatureMapBlock(nn.Module): def __init__(self, input_channels, output_channels): super(FeatureMapBlock, self).__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= (1, 1)) def forward(self, x): return self.conv(x) class UnetNew(nn.Module): def __init__(self, input_channels, output_channels, hidden_channels=64): super(UnetNew, self).__init__() self.upfeature = FeatureMapBlock(input_channels, hidden_channels) self.contract1 = ContractingBlock(hidden_channels) self.contract2 = ContractingBlock(hidden_channels * 2) self.contract3 = ContractingBlock(hidden_channels * 4) self.contract4 = ContractingBlock(hidden_channels * 8) self.expanding1 = ExpandingBlock(hidden_channels * 16) self.expanding2 = ExpandingBlock(hidden_channels * 8) self.expanding3 = ExpandingBlock(hidden_channels * 4) self.expanding4 = ExpandingBlock(hidden_channels * 2) self.downfeature = FeatureMapBlock(hidden_channels, output_channels) def forward(self, input_0): primals_1 = self.upfeature.conv.weight primals_2 = self.upfeature.conv.bias primals_4 = self.contract1.conv1.weight primals_5 = self.contract1.conv1.bias primals_6 = self.contract1.conv2.weight primals_7 = self.contract1.conv2.bias primals_8 = self.contract2.conv1.weight primals_9 = self.contract2.conv1.bias primals_10 = self.contract2.conv2.weight primals_11 = self.contract2.conv2.bias primals_12 = self.contract3.conv1.weight primals_13 = self.contract3.conv1.bias primals_14 = self.contract3.conv2.weight primals_15 = self.contract3.conv2.bias primals_16 = self.contract4.conv1.weight primals_17 = self.contract4.conv1.bias primals_18 = self.contract4.conv2.weight primals_19 = self.contract4.conv2.bias primals_20 = self.expanding1.conv1.weight primals_21 = self.expanding1.conv1.bias primals_22 = self.expanding1.conv2.weight primals_23 = self.expanding1.conv2.bias primals_24 = self.expanding1.conv3.weight primals_25 = self.expanding1.conv3.bias primals_26 = self.expanding2.conv1.weight primals_27 = self.expanding2.conv1.bias primals_28 = self.expanding2.conv2.weight primals_29 = self.expanding2.conv2.bias primals_30 = self.expanding2.conv3.weight primals_31 = self.expanding2.conv3.bias primals_32 = self.expanding3.conv1.weight primals_33 = self.expanding3.conv1.bias primals_34 = self.expanding3.conv2.weight primals_35 = self.expanding3.conv2.bias primals_36 = self.expanding3.conv3.weight primals_37 = self.expanding3.conv3.bias primals_38 = self.expanding4.conv1.weight primals_39 = self.expanding4.conv1.bias primals_40 = self.expanding4.conv2.weight primals_41 = self.expanding4.conv2.bias primals_42 = self.expanding4.conv3.weight primals_43 = self.expanding4.conv3.bias primals_44 = self.downfeature.conv.weight primals_45 = self.downfeature.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45]) return output[0]
furkannturkmen/pytorch-CNN-architecture
Unet
false
10,222
[ "MIT" ]
0
6a864811f51409c1526224c288fe608010e0c888
https://github.com/furkannturkmen/pytorch-CNN-architecture/tree/6a864811f51409c1526224c288fe608010e0c888
Fusion
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/dw/cdwapf4spszv7xrakefo7xfxxiomqve2jhc3fgichvmsioyx2bqg.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] # Source node to ATen node mapping: # z => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %mul, %sub], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tmp21 = tl.full([1], 16, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tl.load(in_ptr0 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 return (buf2, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Fusion(nn.Module): def __init__(self, input_dim, hidden_dim): super(Fusion, self).__init__() self.linear = nn.Linear(input_dim * 4, hidden_dim, bias=True) self.tanh = nn.Tanh() def forward(self, x, y): z = torch.cat([x, y, x * y, x - y], dim=2) return self.tanh(self.linear(z)) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf2, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf2 class FusionNew(nn.Module): def __init__(self, input_dim, hidden_dim): super(FusionNew, self).__init__() self.linear = nn.Linear(input_dim * 4, hidden_dim, bias=True) self.tanh = nn.Tanh() def forward(self, input_0, input_1): primals_3 = self.linear.weight primals_4 = self.linear.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hgrhgy/NumSeq2SQL
Fusion
false
10,223
[ "MIT" ]
0
6f22fdf108736f979afa2dbd3af14aa9ad4718aa
https://github.com/hgrhgy/NumSeq2SQL/tree/6f22fdf108736f979afa2dbd3af14aa9ad4718aa
CRF
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2b/c2bkqvv3w46os3y2jfp7zebz4v4e3jusxayajeubp3hevuxzgzn6.py # Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max] # Source node to ATen node mapping: # add_1 => add_1 # max_1 => max_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_2, %unsqueeze_1), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1), kwargs = {}) triton_poi_fused_add_max_0 = async_compile.triton('triton_poi_fused_add_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = triton_helpers.maximum(tmp5, tmp11) tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp27 = tmp5 > tmp11 tmp28 = tmp5 == tmp11 tmp29 = tmp5 != tmp5 tmp30 = tmp11 != tmp11 tmp31 = tmp29 > tmp30 tmp32 = tmp27 | tmp31 tmp33 = tmp29 & tmp30 tmp34 = tmp28 | tmp33 tmp35 = tl.full([1], 0, tl.int64) tmp36 = tl.full([1], 1, tl.int64) tmp37 = tmp35 < tmp36 tmp38 = tmp34 & tmp37 tmp39 = tmp32 | tmp38 tmp40 = tl.where(tmp39, tmp5, tmp11) tmp41 = tl.where(tmp39, tmp35, tmp36) tmp42 = tmp40 > tmp18 tmp43 = tmp40 == tmp18 tmp44 = tmp40 != tmp40 tmp45 = tmp18 != tmp18 tmp46 = tmp44 > tmp45 tmp47 = tmp42 | tmp46 tmp48 = tmp44 & tmp45 tmp49 = tmp43 | tmp48 tmp50 = tl.full([1], 2, tl.int64) tmp51 = tmp41 < tmp50 tmp52 = tmp49 & tmp51 tmp53 = tmp47 | tmp52 tmp54 = tl.where(tmp53, tmp40, tmp18) tmp55 = tl.where(tmp53, tmp41, tmp50) tmp56 = tmp54 > tmp25 tmp57 = tmp54 == tmp25 tmp58 = tmp54 != tmp54 tmp59 = tmp25 != tmp25 tmp60 = tmp58 > tmp59 tmp61 = tmp56 | tmp60 tmp62 = tmp58 & tmp59 tmp63 = tmp57 | tmp62 tmp64 = tl.full([1], 3, tl.int64) tmp65 = tmp55 < tmp64 tmp66 = tmp63 & tmp65 tmp67 = tmp61 | tmp66 tmp68 = tl.where(tmp67, tmp54, tmp25) tmp69 = tl.where(tmp67, tmp55, tmp64) tl.store(out_ptr0 + (x2), tmp26, xmask) tl.store(out_ptr1 + (x2), tmp69, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dy/cdy673rrkjiv3qikoqrlvr3x4djhltg5txuuelkcf3bbizc3xryu.py # Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max] # Source node to ATen node mapping: # add_3 => add_3 # max_2 => max_2 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_3, %unsqueeze_1), kwargs = {}) # %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_3, 1), kwargs = {}) triton_poi_fused_add_max_1 = async_compile.triton('triton_poi_fused_add_max_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + (16*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (5 + (16*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (6 + (16*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (7 + (16*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 > tmp9 tmp24 = tmp4 == tmp9 tmp25 = tmp4 != tmp4 tmp26 = tmp9 != tmp9 tmp27 = tmp25 > tmp26 tmp28 = tmp23 | tmp27 tmp29 = tmp25 & tmp26 tmp30 = tmp24 | tmp29 tmp31 = tl.full([1], 0, tl.int64) tmp32 = tl.full([1], 1, tl.int64) tmp33 = tmp31 < tmp32 tmp34 = tmp30 & tmp33 tmp35 = tmp28 | tmp34 tmp36 = tl.where(tmp35, tmp4, tmp9) tmp37 = tl.where(tmp35, tmp31, tmp32) tmp38 = tmp36 > tmp15 tmp39 = tmp36 == tmp15 tmp40 = tmp36 != tmp36 tmp41 = tmp15 != tmp15 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp37 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tmp50 = tl.where(tmp49, tmp36, tmp15) tmp51 = tl.where(tmp49, tmp37, tmp46) tmp52 = tmp50 > tmp21 tmp53 = tmp50 == tmp21 tmp54 = tmp50 != tmp50 tmp55 = tmp21 != tmp21 tmp56 = tmp54 > tmp55 tmp57 = tmp52 | tmp56 tmp58 = tmp54 & tmp55 tmp59 = tmp53 | tmp58 tmp60 = tl.full([1], 3, tl.int64) tmp61 = tmp51 < tmp60 tmp62 = tmp59 & tmp61 tmp63 = tmp57 | tmp62 tmp64 = tl.where(tmp63, tmp50, tmp21) tmp65 = tl.where(tmp63, tmp51, tmp60) tl.store(out_ptr0 + (x2), tmp22, xmask) tl.store(out_ptr1 + (x2), tmp65, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y6/cy6aidx4xjpi7w6qjmhugi7jyuzpadheqvm6f3ovuoln3kiyejvo.py # Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max] # Source node to ATen node mapping: # add_5 => add_5 # max_3 => max_3 # Graph fragment: # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_4, %unsqueeze_1), kwargs = {}) # %max_3 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 1), kwargs = {}) triton_poi_fused_add_max_2 = async_compile.triton('triton_poi_fused_add_max_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + (16*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (9 + (16*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (10 + (16*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (11 + (16*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 > tmp9 tmp24 = tmp4 == tmp9 tmp25 = tmp4 != tmp4 tmp26 = tmp9 != tmp9 tmp27 = tmp25 > tmp26 tmp28 = tmp23 | tmp27 tmp29 = tmp25 & tmp26 tmp30 = tmp24 | tmp29 tmp31 = tl.full([1], 0, tl.int64) tmp32 = tl.full([1], 1, tl.int64) tmp33 = tmp31 < tmp32 tmp34 = tmp30 & tmp33 tmp35 = tmp28 | tmp34 tmp36 = tl.where(tmp35, tmp4, tmp9) tmp37 = tl.where(tmp35, tmp31, tmp32) tmp38 = tmp36 > tmp15 tmp39 = tmp36 == tmp15 tmp40 = tmp36 != tmp36 tmp41 = tmp15 != tmp15 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp37 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tmp50 = tl.where(tmp49, tmp36, tmp15) tmp51 = tl.where(tmp49, tmp37, tmp46) tmp52 = tmp50 > tmp21 tmp53 = tmp50 == tmp21 tmp54 = tmp50 != tmp50 tmp55 = tmp21 != tmp21 tmp56 = tmp54 > tmp55 tmp57 = tmp52 | tmp56 tmp58 = tmp54 & tmp55 tmp59 = tmp53 | tmp58 tmp60 = tl.full([1], 3, tl.int64) tmp61 = tmp51 < tmp60 tmp62 = tmp59 & tmp61 tmp63 = tmp57 | tmp62 tmp64 = tl.where(tmp63, tmp50, tmp21) tmp65 = tl.where(tmp63, tmp51, tmp60) tl.store(out_ptr0 + (x2), tmp22, xmask) tl.store(out_ptr1 + (x2), tmp65, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/pz/cpzuxuwhwbn22vaq3ndvkouqpztabhutjiktjp3d5vxwx3ptniiy.py # Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather] # Source node to ATen node mapping: # add_7 => add_7 # max_4 => max_4 # tag_1 => gather # tag_2 => gather_1 # tag_3 => gather_2 # v_6 => add_6 # Graph fragment: # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %select_3), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %unsqueeze_5), kwargs = {}) # %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_7, 1, True), kwargs = {}) # %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_5, 1, %getitem_7), kwargs = {}) # %gather_1 : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_3, 1, %gather), kwargs = {}) # %gather_2 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_1, 1, %gather_1), kwargs = {}) triton_poi_fused_add_gather_max_3 = async_compile.triton('triton_poi_fused_add_gather_max_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i64', 4: '*i64', 5: '*i64', 6: '*i64', 7: '*i64', 8: '*i64', 9: '*i64', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gather_max_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (1)) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (2)) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp47 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (3)) tmp51 = tl.broadcast_to(tmp50, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 + tmp4 tmp8 = tmp6 + tmp7 tmp11 = tmp8 + tmp10 tmp12 = tmp5 > tmp11 tmp13 = tmp5 == tmp11 tmp14 = tmp5 != tmp5 tmp15 = tmp11 != tmp11 tmp16 = tmp14 > tmp15 tmp17 = tmp12 | tmp16 tmp18 = tmp14 & tmp15 tmp19 = tmp13 | tmp18 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tl.full([1], 1, tl.int64) tmp22 = tmp20 < tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 | tmp23 tmp25 = tl.where(tmp24, tmp5, tmp11) tmp26 = tl.where(tmp24, tmp20, tmp21) tmp29 = tmp27 + tmp28 tmp32 = tmp29 + tmp31 tmp33 = tmp25 > tmp32 tmp34 = tmp25 == tmp32 tmp35 = tmp25 != tmp25 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 2, tl.int64) tmp42 = tmp26 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp25, tmp32) tmp46 = tl.where(tmp44, tmp26, tmp41) tmp49 = tmp47 + tmp48 tmp52 = tmp49 + tmp51 tmp53 = tmp45 > tmp52 tmp54 = tmp45 == tmp52 tmp55 = tmp45 != tmp45 tmp56 = tmp52 != tmp52 tmp57 = tmp55 > tmp56 tmp58 = tmp53 | tmp57 tmp59 = tmp55 & tmp56 tmp60 = tmp54 | tmp59 tmp61 = tl.full([1], 3, tl.int64) tmp62 = tmp46 < tmp61 tmp63 = tmp60 & tmp62 tmp64 = tmp58 | tmp63 tmp65 = tl.where(tmp64, tmp45, tmp52) tmp66 = tl.where(tmp64, tmp46, tmp61) tmp67 = tl.full([XBLOCK], 4, tl.int32) tmp68 = tmp66 + tmp67 tmp69 = tmp66 < 0 tmp70 = tl.where(tmp69, tmp68, tmp66) tl.device_assert(((0 <= tmp70) & (tmp70 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp70 < 4") tmp72 = tl.load(in_ptr3 + (tmp70 + (4*x0)), xmask, eviction_policy='evict_last') tmp73 = tmp72 + tmp67 tmp74 = tmp72 < 0 tmp75 = tl.where(tmp74, tmp73, tmp72) tl.device_assert(((0 <= tmp75) & (tmp75 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp75 < 4") tmp77 = tl.load(in_ptr4 + (tmp75 + (4*x0)), xmask, eviction_policy='evict_last') tmp78 = tmp77 + tmp67 tmp79 = tmp77 < 0 tmp80 = tl.where(tmp79, tmp78, tmp77) tl.device_assert(((0 <= tmp80) & (tmp80 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp80 < 4") tmp82 = tl.load(in_ptr5 + (tmp80 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (4*x0), tmp66, xmask) tl.store(out_ptr1 + (4*x0), tmp82, xmask) tl.store(out_ptr2 + (4*x0), tmp77, xmask) tl.store(out_ptr3 + (4*x0), tmp72, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max] stream0 = get_raw_stream(0) triton_poi_fused_add_max_0.run(arg0_1, arg1_1, arg2_1, buf0, buf1, 16, grid=grid(16), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max] triton_poi_fused_add_max_1.run(buf0, arg0_1, arg2_1, buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = buf0; del buf0 # reuse buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max] triton_poi_fused_add_max_2.run(buf2, arg0_1, arg2_1, buf4, buf5, 16, grid=grid(16), stream=stream0) del arg2_1 del buf2 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3) # alias buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0) # alias buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1) # alias buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2) # alias # Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather] triton_poi_fused_add_gather_max_3.run(buf4, arg0_1, arg3_1, buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, grid=grid(4), stream=stream0) del arg0_1 del arg3_1 del buf1 del buf3 del buf4 del buf5 return (buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CRF(nn.Module): """ Implements Conditional Random Fields that can be trained via backpropagation. """ def __init__(self, num_tags): super(CRF, self).__init__() self.num_tags = num_tags self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags)) self.start_transitions = nn.Parameter(torch.randn(num_tags)) self.stop_transitions = nn.Parameter(torch.randn(num_tags)) nn.init.xavier_normal_(self.transitions) def forward(self, feats): if len(feats.shape) != 3: raise ValueError('feats must be 3-d got {}-d'.format(feats.shape)) return self._viterbi(feats) def loss(self, feats, tags): """ Computes negative log likelihood between features and tags. Essentially difference between individual sequence scores and sum of all possible sequence scores (partition function) Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Negative log likelihood [a scalar] """ if len(feats.shape) != 3: raise ValueError('feats must be 3-d got {}-d'.format(feats.shape)) if len(tags.shape) != 2: raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape) ) if feats.shape[:2] != tags.shape: raise ValueError( 'First two dimensions of feats and tags must match ', feats .shape, tags.shape) sequence_score = self._sequence_score(feats, tags) partition_function = self._partition_function(feats) log_probability = sequence_score - partition_function return -log_probability.mean() def _sequence_score(self, feats, tags): """ Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Sequence score of shape [batch size] """ feats.shape[0] feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1 ) tags_pairs = tags.unfold(1, 2, 1) indices = tags_pairs.permute(2, 0, 1).chunk(2) trans_score = self.transitions[indices].squeeze(0).sum(dim=-1) start_score = self.start_transitions[tags[:, 0]] stop_score = self.stop_transitions[tags[:, -1]] return feat_score + start_score + trans_score + stop_score def _partition_function(self, feats): """ Computes the partitition function for CRF using the forward algorithm. Basically calculate scores for all possible tag sequences for the given feature vector sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Total scores of shape [batch size] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) a = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) for i in range(1, seq_size): feat = feats[:, i].unsqueeze(1) a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1) return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1) def _viterbi(self, feats): """ Uses Viterbi algorithm to predict the best sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Best tag sequence [batch size, sequence length] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) v = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) paths = [] for i in range(1, seq_size): feat = feats[:, i] v, idx = (v.unsqueeze(-1) + transitions).max(1) paths.append(idx) v = v + feat v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True) tags = [tag] for idx in reversed(paths): tag = idx.gather(1, tag) tags.append(tag) tags.reverse() return torch.cat(tags, 1) def _log_sum_exp(self, logits, dim): """ Computes log-sum-exp in a stable way """ max_val, _ = logits.max(dim) return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_tags': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = triton_helpers.maximum(tmp5, tmp11) tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp27 = tmp5 > tmp11 tmp28 = tmp5 == tmp11 tmp29 = tmp5 != tmp5 tmp30 = tmp11 != tmp11 tmp31 = tmp29 > tmp30 tmp32 = tmp27 | tmp31 tmp33 = tmp29 & tmp30 tmp34 = tmp28 | tmp33 tmp35 = tl.full([1], 0, tl.int64) tmp36 = tl.full([1], 1, tl.int64) tmp37 = tmp35 < tmp36 tmp38 = tmp34 & tmp37 tmp39 = tmp32 | tmp38 tmp40 = tl.where(tmp39, tmp5, tmp11) tmp41 = tl.where(tmp39, tmp35, tmp36) tmp42 = tmp40 > tmp18 tmp43 = tmp40 == tmp18 tmp44 = tmp40 != tmp40 tmp45 = tmp18 != tmp18 tmp46 = tmp44 > tmp45 tmp47 = tmp42 | tmp46 tmp48 = tmp44 & tmp45 tmp49 = tmp43 | tmp48 tmp50 = tl.full([1], 2, tl.int64) tmp51 = tmp41 < tmp50 tmp52 = tmp49 & tmp51 tmp53 = tmp47 | tmp52 tmp54 = tl.where(tmp53, tmp40, tmp18) tmp55 = tl.where(tmp53, tmp41, tmp50) tmp56 = tmp54 > tmp25 tmp57 = tmp54 == tmp25 tmp58 = tmp54 != tmp54 tmp59 = tmp25 != tmp25 tmp60 = tmp58 > tmp59 tmp61 = tmp56 | tmp60 tmp62 = tmp58 & tmp59 tmp63 = tmp57 | tmp62 tmp64 = tl.full([1], 3, tl.int64) tmp65 = tmp55 < tmp64 tmp66 = tmp63 & tmp65 tmp67 = tmp61 | tmp66 tl.where(tmp67, tmp54, tmp25) tmp69 = tl.where(tmp67, tmp55, tmp64) tl.store(out_ptr0 + x2, tmp26, xmask) tl.store(out_ptr1 + x2, tmp69, xmask) @triton.jit def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 > tmp9 tmp24 = tmp4 == tmp9 tmp25 = tmp4 != tmp4 tmp26 = tmp9 != tmp9 tmp27 = tmp25 > tmp26 tmp28 = tmp23 | tmp27 tmp29 = tmp25 & tmp26 tmp30 = tmp24 | tmp29 tmp31 = tl.full([1], 0, tl.int64) tmp32 = tl.full([1], 1, tl.int64) tmp33 = tmp31 < tmp32 tmp34 = tmp30 & tmp33 tmp35 = tmp28 | tmp34 tmp36 = tl.where(tmp35, tmp4, tmp9) tmp37 = tl.where(tmp35, tmp31, tmp32) tmp38 = tmp36 > tmp15 tmp39 = tmp36 == tmp15 tmp40 = tmp36 != tmp36 tmp41 = tmp15 != tmp15 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp37 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tmp50 = tl.where(tmp49, tmp36, tmp15) tmp51 = tl.where(tmp49, tmp37, tmp46) tmp52 = tmp50 > tmp21 tmp53 = tmp50 == tmp21 tmp54 = tmp50 != tmp50 tmp55 = tmp21 != tmp21 tmp56 = tmp54 > tmp55 tmp57 = tmp52 | tmp56 tmp58 = tmp54 & tmp55 tmp59 = tmp53 | tmp58 tmp60 = tl.full([1], 3, tl.int64) tmp61 = tmp51 < tmp60 tmp62 = tmp59 & tmp61 tmp63 = tmp57 | tmp62 tl.where(tmp63, tmp50, tmp21) tmp65 = tl.where(tmp63, tmp51, tmp60) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp65, xmask) @triton.jit def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 > tmp9 tmp24 = tmp4 == tmp9 tmp25 = tmp4 != tmp4 tmp26 = tmp9 != tmp9 tmp27 = tmp25 > tmp26 tmp28 = tmp23 | tmp27 tmp29 = tmp25 & tmp26 tmp30 = tmp24 | tmp29 tmp31 = tl.full([1], 0, tl.int64) tmp32 = tl.full([1], 1, tl.int64) tmp33 = tmp31 < tmp32 tmp34 = tmp30 & tmp33 tmp35 = tmp28 | tmp34 tmp36 = tl.where(tmp35, tmp4, tmp9) tmp37 = tl.where(tmp35, tmp31, tmp32) tmp38 = tmp36 > tmp15 tmp39 = tmp36 == tmp15 tmp40 = tmp36 != tmp36 tmp41 = tmp15 != tmp15 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp37 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tmp50 = tl.where(tmp49, tmp36, tmp15) tmp51 = tl.where(tmp49, tmp37, tmp46) tmp52 = tmp50 > tmp21 tmp53 = tmp50 == tmp21 tmp54 = tmp50 != tmp50 tmp55 = tmp21 != tmp21 tmp56 = tmp54 > tmp55 tmp57 = tmp52 | tmp56 tmp58 = tmp54 & tmp55 tmp59 = tmp53 | tmp58 tmp60 = tl.full([1], 3, tl.int64) tmp61 = tmp51 < tmp60 tmp62 = tmp59 & tmp61 tmp63 = tmp57 | tmp62 tl.where(tmp63, tmp50, tmp21) tmp65 = tl.where(tmp63, tmp51, tmp60) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp65, xmask) @triton.jit def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + 1) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr2 + 2) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp47 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr2 + 3) tmp51 = tl.broadcast_to(tmp50, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 + tmp4 tmp8 = tmp6 + tmp7 tmp11 = tmp8 + tmp10 tmp12 = tmp5 > tmp11 tmp13 = tmp5 == tmp11 tmp14 = tmp5 != tmp5 tmp15 = tmp11 != tmp11 tmp16 = tmp14 > tmp15 tmp17 = tmp12 | tmp16 tmp18 = tmp14 & tmp15 tmp19 = tmp13 | tmp18 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tl.full([1], 1, tl.int64) tmp22 = tmp20 < tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 | tmp23 tmp25 = tl.where(tmp24, tmp5, tmp11) tmp26 = tl.where(tmp24, tmp20, tmp21) tmp29 = tmp27 + tmp28 tmp32 = tmp29 + tmp31 tmp33 = tmp25 > tmp32 tmp34 = tmp25 == tmp32 tmp35 = tmp25 != tmp25 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 2, tl.int64) tmp42 = tmp26 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp25, tmp32) tmp46 = tl.where(tmp44, tmp26, tmp41) tmp49 = tmp47 + tmp48 tmp52 = tmp49 + tmp51 tmp53 = tmp45 > tmp52 tmp54 = tmp45 == tmp52 tmp55 = tmp45 != tmp45 tmp56 = tmp52 != tmp52 tmp57 = tmp55 > tmp56 tmp58 = tmp53 | tmp57 tmp59 = tmp55 & tmp56 tmp60 = tmp54 | tmp59 tmp61 = tl.full([1], 3, tl.int64) tmp62 = tmp46 < tmp61 tmp63 = tmp60 & tmp62 tmp64 = tmp58 | tmp63 tl.where(tmp64, tmp45, tmp52) tmp66 = tl.where(tmp64, tmp46, tmp61) tmp67 = tl.full([XBLOCK], 4, tl.int32) tmp68 = tmp66 + tmp67 tmp69 = tmp66 < 0 tmp70 = tl.where(tmp69, tmp68, tmp66) tl.device_assert((0 <= tmp70) & (tmp70 < 4) | ~xmask, 'index out of bounds: 0 <= tmp70 < 4') tmp72 = tl.load(in_ptr3 + (tmp70 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp73 = tmp72 + tmp67 tmp74 = tmp72 < 0 tmp75 = tl.where(tmp74, tmp73, tmp72) tl.device_assert((0 <= tmp75) & (tmp75 < 4) | ~xmask, 'index out of bounds: 0 <= tmp75 < 4') tmp77 = tl.load(in_ptr4 + (tmp75 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp78 = tmp77 + tmp67 tmp79 = tmp77 < 0 tmp80 = tl.where(tmp79, tmp78, tmp77) tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask, 'index out of bounds: 0 <= tmp80 < 4') tmp82 = tl.load(in_ptr5 + (tmp80 + 4 * x0), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + 4 * x0, tmp66, xmask) tl.store(out_ptr1 + 4 * x0, tmp82, xmask) tl.store(out_ptr2 + 4 * x0, tmp77, xmask) tl.store(out_ptr3 + 4 * x0, tmp72, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_add_max_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_add_max_1[grid(16)](buf0, arg0_1, arg2_1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf0 del buf0 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_add_max_2[grid(16)](buf2, arg0_1, arg2_1, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 del buf2 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3) buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0) buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1) buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2) triton_poi_fused_add_gather_max_3[grid(4)](buf4, arg0_1, arg3_1, buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg3_1 del buf1 del buf3 del buf4 del buf5 return buf10, class CRFNew(nn.Module): """ Implements Conditional Random Fields that can be trained via backpropagation. """ def __init__(self, num_tags): super(CRFNew, self).__init__() self.num_tags = num_tags self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags)) self.start_transitions = nn.Parameter(torch.randn(num_tags)) self.stop_transitions = nn.Parameter(torch.randn(num_tags)) nn.init.xavier_normal_(self.transitions) def loss(self, feats, tags): """ Computes negative log likelihood between features and tags. Essentially difference between individual sequence scores and sum of all possible sequence scores (partition function) Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Negative log likelihood [a scalar] """ if len(feats.shape) != 3: raise ValueError('feats must be 3-d got {}-d'.format(feats.shape)) if len(tags.shape) != 2: raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape) ) if feats.shape[:2] != tags.shape: raise ValueError( 'First two dimensions of feats and tags must match ', feats .shape, tags.shape) sequence_score = self._sequence_score(feats, tags) partition_function = self._partition_function(feats) log_probability = sequence_score - partition_function return -log_probability.mean() def _sequence_score(self, feats, tags): """ Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Sequence score of shape [batch size] """ feats.shape[0] feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1 ) tags_pairs = tags.unfold(1, 2, 1) indices = tags_pairs.permute(2, 0, 1).chunk(2) trans_score = self.transitions[indices].squeeze(0).sum(dim=-1) start_score = self.start_transitions[tags[:, 0]] stop_score = self.stop_transitions[tags[:, -1]] return feat_score + start_score + trans_score + stop_score def _partition_function(self, feats): """ Computes the partitition function for CRF using the forward algorithm. Basically calculate scores for all possible tag sequences for the given feature vector sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Total scores of shape [batch size] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) a = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) for i in range(1, seq_size): feat = feats[:, i].unsqueeze(1) a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1) return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1) def _viterbi(self, feats): """ Uses Viterbi algorithm to predict the best sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Best tag sequence [batch size, sequence length] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) v = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) paths = [] for i in range(1, seq_size): feat = feats[:, i] v, idx = (v.unsqueeze(-1) + transitions).max(1) paths.append(idx) v = v + feat v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True) tags = [tag] for idx in reversed(paths): tag = idx.gather(1, tag) tags.append(tag) tags.reverse() return torch.cat(tags, 1) def _log_sum_exp(self, logits, dim): """ Computes log-sum-exp in a stable way """ max_val, _ = logits.max(dim) return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log() def forward(self, input_0): arg2_1 = self.transitions arg1_1 = self.start_transitions arg3_1 = self.stop_transitions arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
jbogensperger/DRUG_CROSSNER
CRF
false
10,224
[ "MIT" ]
0
c82fc4ce6fd6229b48d28bafffe38f5ea3dcd6aa
https://github.com/jbogensperger/DRUG_CROSSNER/tree/c82fc4ce6fd6229b48d28bafffe38f5ea3dcd6aa
BertLastCLSModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hf/chfhlbeyguxis6dfw6oxmvepl6oti2drna65mesvwt65iketca7n.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select_1,), kwargs = {}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (192 + x0 + (16*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class BertLastCLSModule(nn.Module): def __init__(self, dropout_prob=0.0): super().__init__() self.dropout = nn.Dropout(dropout_prob) def forward(self, input): last_hidden = input[-1][:, 0, :] out = self.dropout(last_hidden) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (192 + x0 + 16 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class BertLastCLSModuleNew(nn.Module): def __init__(self, dropout_prob=0.0): super().__init__() self.dropout = nn.Dropout(dropout_prob) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jdunnmon/emmental-tutorials
BertLastCLSModule
false
10,225
[ "MIT" ]
0
2aa6c86e2e74943fbf75f4df1e70c5b8614c6c49
https://github.com/jdunnmon/emmental-tutorials/tree/2aa6c86e2e74943fbf75f4df1e70c5b8614c6c49
SelfGating
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/32/c32gpnu7y6kwawwiknabqcyafcipv27fjg22cpx6wzdxmd52bm4o.py # Topologically Sorted Source Nodes: [spatiotemporal_average], Original ATen: [aten.mean] # Source node to ATen node mapping: # spatiotemporal_average => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3, 4]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c2/cc26hd4oq32vrxgrqqakqqnzwxv6d6ovzdl26r33utpsmw442x3p.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %primals_1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [spatiotemporal_average], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 64, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf2, primals_1, buf3, 1024, grid=grid(1024), stream=stream0) return (buf3, primals_1, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch as th import torch.nn as nn class SelfGating(nn.Module): def __init__(self, input_dim): super(SelfGating, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_tensor): """Feature gating as used in S3D-G. """ spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4]) weights = self.fc(spatiotemporal_average) weights = th.sigmoid(weights) return weights[:, :, None, None, None] * input_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(1024)](buf2, primals_1, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, buf1, buf2 class SelfGatingNew(nn.Module): def __init__(self, input_dim): super(SelfGatingNew, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
inbalcroitoru/Information-retrieval-Audio-retrieval-with-text-queries
SelfGating
false
10,226
[ "Apache-2.0" ]
0
d98ee159c61a8a9a1c433f0bfed14e7005215d5f
https://github.com/inbalcroitoru/Information-retrieval-Audio-retrieval-with-text-queries/tree/d98ee159c61a8a9a1c433f0bfed14e7005215d5f
QLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/em/cemxfn6bdnqnwevpuuy2xwvsj3u7tsm43nox4vwe6hoin3yj4ce5.py # Topologically Sorted Source Nodes: [quantized_weight], Original ATen: [aten.sign, aten.mul] # Source node to ATen node mapping: # quantized_weight => mul, sign # Graph fragment: # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sign), kwargs = {}) triton_poi_fused_mul_sign_0 = async_compile.triton('triton_poi_fused_mul_sign_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sign_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sign_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp3 < tmp2 tmp5 = tmp4.to(tl.int8) tmp6 = tmp2 < tmp3 tmp7 = tmp6.to(tl.int8) tmp8 = tmp5 - tmp7 tmp9 = tmp8.to(tmp2.dtype) tmp10 = tmp1 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [quantized_weight], Original ATen: [aten.sign, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sign_0.run(primals_2, primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn import torch.nn.functional as F import torch.autograd as A from torch.autograd.function import once_differentiable from torch.nn.parameter import Parameter import torch.nn.parallel import torch.optim import torch.utils.data class WeightQuantization(A.Function): @staticmethod def forward(ctx, weight: 'Tensor', alpha: 'Tensor') ->Tensor: ctx.save_for_backward(weight, alpha) return alpha * weight.sign() @staticmethod @once_differentiable def backward(ctx, grad_output: 'Tensor') ->Tensor: weight, alpha = ctx.saved_tensors grad_input = alpha * grad_output grad_alpha = grad_output * weight.sign() grad_alpha.unsqueeze_(0) return grad_input, grad_alpha class QLinear(nn.Linear): def __init__(self, *args, **kwargs) ->None: super().__init__(*args, **kwargs) self.alpha = Parameter(torch.ones(1)) def forward(self, input: 'Tensor') ->Tensor: quantized_weight = WeightQuantization.apply(self.weight, self.alpha) return F.linear(input, quantized_weight, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor import torch.nn as nn import torch.autograd as A from torch.autograd.function import once_differentiable from torch.nn.parameter import Parameter import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sign_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp3 < tmp2 tmp5 = tmp4.to(tl.int8) tmp6 = tmp2 < tmp3 tmp7 = tmp6.to(tl.int8) tmp8 = tmp5 - tmp7 tmp9 = tmp8.to(tmp2.dtype) tmp10 = tmp1 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sign_0[grid(16)](primals_2, primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0) class WeightQuantization(A.Function): @staticmethod def forward(ctx, weight: 'Tensor', alpha: 'Tensor') ->Tensor: ctx.save_for_backward(weight, alpha) return alpha * weight.sign() @staticmethod @once_differentiable def backward(ctx, grad_output: 'Tensor') ->Tensor: weight, alpha = ctx.saved_tensors grad_input = alpha * grad_output grad_alpha = grad_output * weight.sign() grad_alpha.unsqueeze_(0) return grad_input, grad_alpha class QLinearNew(nn.Linear): def __init__(self, *args, **kwargs) ->None: super().__init__(*args, **kwargs) self.alpha = Parameter(torch.ones(1)) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = self.alpha primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
i207M/pytorch-cifar
QLinear
false
10,227
[ "MIT" ]
0
df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
https://github.com/i207M/pytorch-cifar/tree/df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
FusionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/dw/cdwapf4spszv7xrakefo7xfxxiomqve2jhc3fgichvmsioyx2bqg.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] # Source node to ATen node mapping: # z => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %mul, %sub], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tmp21 = tl.full([1], 16, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tl.load(in_ptr0 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/u2/cu2egzqq3ivzxp5v5n7o6veft7jfxb4p5it5arzllbv5rrtioyeh.py # Topologically Sorted Source Nodes: [gated, fusion, mul_1, sub_1, mul_2, add], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # add => add # fusion => tanh # gated => sigmoid # mul_1 => mul_1 # mul_2 => mul_2 # sub_1 => sub_1 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp7 = tl.load(in_ptr2 + (x2), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 16), (16, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf3) del primals_5 del primals_6 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gated, fusion, mul_1, sub_1, mul_2, add], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.rsub, aten.add] triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf2, buf3, primals_1, buf4, 64, grid=grid(64), stream=stream0) return (buf4, primals_1, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FusionLayer(nn.Module): """ vector based fusion m(x, y) = W([x, y, x * y, x - y]) + b g(x, y) = w([x, y, x * y, x - y]) + b :returns g(x, y) * m(x, y) + (1 - g(x, y)) * x """ def __init__(self, input_dim): super(FusionLayer, self).__init__() self.linear_f = nn.Linear(input_dim * 4, input_dim, bias=True) self.linear_g = nn.Linear(input_dim * 4, 1, bias=True) self.tanh = nn.Tanh() self.sigmoid = nn.Sigmoid() def forward(self, x, y): z = torch.cat([x, y, x * y, x - y], dim=2) gated = self.sigmoid(self.linear_g(z)) fusion = self.tanh(self.linear_f(z)) return gated * fusion + (1 - gated) * x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 16), (16, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf3) del primals_5 del primals_6 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(64)](buf2, buf3, primals_1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf4, primals_1, reinterpret_tensor(buf0, (16, 16), (16, 1), 0 ), buf2, buf3 class FusionLayerNew(nn.Module): """ vector based fusion m(x, y) = W([x, y, x * y, x - y]) + b g(x, y) = w([x, y, x * y, x - y]) + b :returns g(x, y) * m(x, y) + (1 - g(x, y)) * x """ def __init__(self, input_dim): super(FusionLayerNew, self).__init__() self.linear_f = nn.Linear(input_dim * 4, input_dim, bias=True) self.linear_g = nn.Linear(input_dim * 4, 1, bias=True) self.tanh = nn.Tanh() self.sigmoid = nn.Sigmoid() def forward(self, input_0, input_1): primals_5 = self.linear_f.weight primals_6 = self.linear_f.bias primals_3 = self.linear_g.weight primals_4 = self.linear_g.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
hgrhgy/NumSeq2SQL
FusionLayer
false
10,228
[ "MIT" ]
0
6f22fdf108736f979afa2dbd3af14aa9ad4718aa
https://github.com/hgrhgy/NumSeq2SQL/tree/6f22fdf108736f979afa2dbd3af14aa9ad4718aa
QConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/y3/cy3t7teiqscujrva4gmc73iumyeulw2nodjzb7uhaqryzuqxboyk.py # Topologically Sorted Source Nodes: [quantized_weight], Original ATen: [aten.sign, aten.mul] # Source node to ATen node mapping: # quantized_weight => mul, sign # Graph fragment: # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sign), kwargs = {}) triton_poi_fused_mul_sign_0 = async_compile.triton('triton_poi_fused_mul_sign_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sign_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sign_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp3 < tmp2 tmp5 = tmp4.to(tl.int8) tmp6 = tmp2 < tmp3 tmp7 = tmp6.to(tl.int8) tmp8 = tmp5 - tmp7 tmp9 = tmp8.to(tmp2.dtype) tmp10 = tmp1 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [quantized_weight], Original ATen: [aten.sign, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sign_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (buf2, primals_1, primals_2, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn import torch.autograd as A from torch.autograd.function import once_differentiable from torch.nn.parameter import Parameter import torch.nn.parallel import torch.optim import torch.utils.data class WeightQuantization(A.Function): @staticmethod def forward(ctx, weight: 'Tensor', alpha: 'Tensor') ->Tensor: ctx.save_for_backward(weight, alpha) return alpha * weight.sign() @staticmethod @once_differentiable def backward(ctx, grad_output: 'Tensor') ->Tensor: weight, alpha = ctx.saved_tensors grad_input = alpha * grad_output grad_alpha = grad_output * weight.sign() grad_alpha.unsqueeze_(0) return grad_input, grad_alpha class QConv2d(nn.Conv2d): def __init__(self, *args, **kwargs) ->None: super().__init__(*args, **kwargs) self.alpha = Parameter(torch.ones(1)) def forward(self, input: 'Tensor') ->Tensor: quantized_weight = WeightQuantization.apply(self.weight, self.alpha) return self._conv_forward(input, quantized_weight, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor import torch.nn as nn import torch.autograd as A from torch.autograd.function import once_differentiable from torch.nn.parameter import Parameter import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sign_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp3 < tmp2 tmp5 = tmp4.to(tl.int8) tmp6 = tmp2 < tmp3 tmp7 = tmp6.to(tl.int8) tmp8 = tmp5 - tmp7 tmp9 = tmp8.to(tmp2.dtype) tmp10 = tmp1 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sign_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_1, primals_2, primals_4, buf0 class WeightQuantization(A.Function): @staticmethod def forward(ctx, weight: 'Tensor', alpha: 'Tensor') ->Tensor: ctx.save_for_backward(weight, alpha) return alpha * weight.sign() @staticmethod @once_differentiable def backward(ctx, grad_output: 'Tensor') ->Tensor: weight, alpha = ctx.saved_tensors grad_input = alpha * grad_output grad_alpha = grad_output * weight.sign() grad_alpha.unsqueeze_(0) return grad_input, grad_alpha class QConv2dNew(nn.Conv2d): def __init__(self, *args, **kwargs) ->None: super().__init__(*args, **kwargs) self.alpha = Parameter(torch.ones(1)) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = self.alpha primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
i207M/pytorch-cifar
QConv2d
false
10,229
[ "MIT" ]
0
df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
https://github.com/i207M/pytorch-cifar/tree/df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/bs/cbstxeghddltznr7shuzsnth6ngv6mnftr2w7pqzzm5flm72plbl.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.convolution] # Source node to ATen node mapping: # e => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/to/ctohnag3s2my72gfje47knfyigkawenmq4hwyddhmhls6qicb3io.py # Topologically Sorted Source Nodes: [e, expanded_q, add, tanh], Original ATen: [aten.convolution, aten.repeat, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # e => convolution # expanded_q => repeat # tanh => tanh # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 1, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, %convolution), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_convolution_repeat_tanh_1 = async_compile.triton('triton_poi_fused_add_convolution_repeat_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_repeat_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 4) % 4 x3 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp2 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr0 + (x4), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf1, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [e, expanded_q, add, tanh], Original ATen: [aten.convolution, aten.repeat, aten.add, aten.tanh] triton_poi_fused_add_convolution_repeat_tanh_1.run(buf3, primals_6, buf0, buf4, 64, grid=grid(64), stream=stream0) del primals_6 buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_7, (4, 1, 4), (0, 0, 1), 0), buf4, out=buf5) return (buf3, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), primals_4, primals_5, primals_7, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class Attention(nn.Module): """A generic attention module for a decoder in seq2seq""" def __init__(self, dim, use_tanh=False, C=10): super(Attention, self).__init__() self.use_tanh = use_tanh self.project_query = nn.Linear(dim, dim) self.project_ref = nn.Conv1d(dim, dim, 1, 1) self.C = C self.tanh = nn.Tanh() self.v = nn.Parameter(torch.FloatTensor(dim)) self.v.data.uniform_(-(1.0 / math.sqrt(dim)), 1.0 / math.sqrt(dim)) def forward(self, query, ref): """ Args: query: is the hidden state of the decoder at the current time step. batch x dim ref: the set of hidden states from the encoder. sourceL x batch x hidden_dim """ ref = ref.permute(1, 2, 0) q = self.project_query(query).unsqueeze(2) e = self.project_ref(ref) expanded_q = q.repeat(1, 1, e.size(2)) v_view = self.v.unsqueeze(0).expand(expanded_q.size(0), len(self.v) ).unsqueeze(1) u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1) if self.use_tanh: logits = self.C * self.tanh(u) else: logits = u return e, logits def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x3 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp2 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr0 + x4, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf2 del buf2 buf4 = buf1 del buf1 triton_poi_fused_add_convolution_repeat_tanh_1[grid(64)](buf3, primals_6, buf0, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(primals_7, (4, 1, 4), (0, 0, 1), 0), buf4, out=buf5) return buf3, reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_4, primals_5, primals_7, reinterpret_tensor(primals_1, ( 4, 4, 4), (4, 1, 16), 0), buf4 class AttentionNew(nn.Module): """A generic attention module for a decoder in seq2seq""" def __init__(self, dim, use_tanh=False, C=10): super(AttentionNew, self).__init__() self.use_tanh = use_tanh self.project_query = nn.Linear(dim, dim) self.project_ref = nn.Conv1d(dim, dim, 1, 1) self.C = C self.tanh = nn.Tanh() self.v = nn.Parameter(torch.FloatTensor(dim)) self.v.data.uniform_(-(1.0 / math.sqrt(dim)), 1.0 / math.sqrt(dim)) def forward(self, input_0, input_1): primals_3 = self.v primals_2 = self.project_query.weight primals_6 = self.project_query.bias primals_5 = self.project_ref.weight primals_7 = self.project_ref.bias primals_4 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
iamstevepaul/MRTA-Attention
Attention
false
10,230
[ "MIT" ]
0
fc177440f7354212c41ad02ef76fdda43cc0aa57
https://github.com/iamstevepaul/MRTA-Attention/tree/fc177440f7354212c41ad02ef76fdda43cc0aa57
AugCNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ur/curzudn4ai4j7lgrmbqwy57jpcw3gylwk4nkg6jt7lqh577w5ku7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 49152, grid=grid(49152), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNN(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNN, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, obs): return self.aug(obs) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNNNew(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNNNew, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, input_0): primals_2 = self.aug.weight primals_3 = self.aug.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jajajag/auto-drac
AugCNN
false
10,231
[ "MIT" ]
0
2241f9f5f10a4d863a8b9d198da1d39e5feb59a0
https://github.com/jajajag/auto-drac/tree/2241f9f5f10a4d863a8b9d198da1d39e5feb59a0
MeanAct
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/pi/cpi2cbijddvm6xxaxvg53d6uvk4igch2jlnfkqsndp2pn2s566k7.py # Topologically Sorted Source Nodes: [exp, clamp], Original ATen: [aten.exp, aten.clamp] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%exp, 1e-05), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1000000.0), kwargs = {}) triton_poi_fused_clamp_exp_0 = async_compile.triton('triton_poi_fused_clamp_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1e-05 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = 1000000.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp, clamp], Original ATen: [aten.exp, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_exp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MeanAct(nn.Module): def __init__(self): super(MeanAct, self).__init__() def forward(self, x): return torch.clamp(torch.exp(x), min=1e-05, max=1000000.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1e-05 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = 1000000.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MeanActNew(nn.Module): def __init__(self): super(MeanActNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jdasam/scDCC
MeanAct
false
10,232
[ "Apache-2.0" ]
0
8ebaed766db5ad56021983ebc13e9a60b6c7b453
https://github.com/jdasam/scDCC/tree/8ebaed766db5ad56021983ebc13e9a60b6c7b453
BatchDense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/b3/cb35lfgojdtaj2fnq2zxtf4ul7ez7i5xdvfuqztkpfoiqg5brilt.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ln/clnksfognfphntlbdomwxccp6uch3hcmogn57h4lfbsscsff2v5a.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # out_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/s3/cs3qietwuvtilqj2kdtaqahj7mazz7yw5ref32tvvmdaisydiyhq.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] # Source node to ATen node mapping: # Graph fragment: # %permute_3 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {}) triton_poi_fused_transpose_2 = async_compile.triton('triton_poi_fused_transpose_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_transpose_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.clone, aten.baddbmm] extern_kernels.bmm(buf0, primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_2, buf1, buf2, 64, grid=grid(64), stream=stream0) del primals_2 buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] triton_poi_fused_transpose_2.run(buf0, buf3, 64, grid=grid(64), stream=stream0) del buf0 return (reinterpret_tensor(buf2, (4, 16), (16, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter class BatchDense(nn.Module): def __init__(self, batch, in_features, out_features, bias_init=None): super(BatchDense, self).__init__() self.batch = batch self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(batch, in_features, out_features)) self.bias = Parameter(torch.Tensor(batch, 1, out_features)) self.reset_parameters(bias_init) def reset_parameters(self, bias_init=None): stdv = math.sqrt(6.0 / (self.in_features + self.out_features)) self.weight.data.uniform_(-stdv, stdv) if bias_init is not None: self.bias.data = torch.from_numpy(bias_init) else: self.bias.data.fill_(0) def forward(self, x): x.size() x = x.view(x.size(0), self.batch, -1) out = x.transpose(0, 1).contiguous() out = torch.baddbmm(self.bias, out, self.weight) out = out.transpose(0, 1).contiguous() out = out.view(x.size(0), -1) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'batch': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_transpose_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf0, primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](primals_2, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0) del buf1 triton_poi_fused_transpose_2[grid(64)](buf0, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return reinterpret_tensor(buf2, (4, 16), (16, 1), 0), buf3 class BatchDenseNew(nn.Module): def __init__(self, batch, in_features, out_features, bias_init=None): super(BatchDenseNew, self).__init__() self.batch = batch self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(batch, in_features, out_features)) self.bias = Parameter(torch.Tensor(batch, 1, out_features)) self.reset_parameters(bias_init) def reset_parameters(self, bias_init=None): stdv = math.sqrt(6.0 / (self.in_features + self.out_features)) self.weight.data.uniform_(-stdv, stdv) if bias_init is not None: self.bias.data = torch.from_numpy(bias_init) else: self.bias.data.fill_(0) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
iloncka/neurotrees
BatchDense
false
10,233
[ "MIT" ]
0
ddb52dc0e7ac1cf67a426b401ba06149807e03ec
https://github.com/iloncka/neurotrees/tree/ddb52dc0e7ac1cf67a426b401ba06149807e03ec
VAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fm/cfmlbw5yjdhed6beukq6zpdxc5zldkghuzoad5ynyyrfanj525wd.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = (xindex // 2000) tmp0 = tl.load(in_out_ptr0 + (x0 + (2016*x1)), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0 + (2016*x1)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/d7/cd7rilnjd42cirsc5dhnnwlficmjz5omrtsdfojgouhplcpynn4n.py # Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu] # Source node to ATen node mapping: # h2 => relu_1 # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_5), kwargs = {}) # %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ex/cexwuc6up4cv5uxb46cafc3eelvzsyfhdhsot7lgpz5xv7tenoc2.py # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # std => exp # z => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %addmm_2), kwargs = {}) triton_poi_fused_add_exp_mul_2 = async_compile.triton('triton_poi_fused_add_exp_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/mg/cmghe67e45ehrx7vylemnjyumsq7otljf7kg5n7svjfjhvwv6zaa.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 17188 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4297 x1 = (xindex // 4297) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4320*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (4, 4297), (4297, 1)) assert_size_stride(primals_2, (2000, 4297), (4297, 1)) assert_size_stride(primals_3, (2000, ), (1, )) assert_size_stride(primals_4, (100, 2000), (2000, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (5, 100), (100, 1)) assert_size_stride(primals_7, (5, ), (1, )) assert_size_stride(primals_8, (5, 100), (100, 1)) assert_size_stride(primals_9, (5, ), (1, )) assert_size_stride(primals_10, (100, 5), (5, 1)) assert_size_stride(primals_11, (100, ), (1, )) assert_size_stride(primals_12, (2000, 100), (100, 1)) assert_size_stride(primals_13, (2000, ), (1, )) assert_size_stride(primals_14, (4297, 2000), (2000, 1)) assert_size_stride(primals_15, (4297, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4297, 2000), (1, 4297), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 8000, grid=grid(8000), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2000, 100), (1, 2000), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 400, grid=grid(400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf5) del primals_9 # Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like] buf6 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] triton_poi_fused_add_exp_mul_2.run(buf7, buf5, buf4, buf8, 20, grid=grid(20), stream=stream0) buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (5, 100), (1, 5), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf10, primals_11, 400, grid=grid(400), stream=stream0) del primals_11 buf11 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf10, reinterpret_tensor(primals_12, (100, 2000), (1, 100), 0), out=buf11) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [h4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf12, primals_13, 8000, grid=grid(8000), stream=stream0) del primals_13 buf13 = empty_strided_cuda((4, 4297), (4320, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_14, (2000, 4297), (1, 2000), 0), out=buf13) buf14 = empty_strided_cuda((4, 4297), (4297, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_3.run(buf13, primals_15, buf14, 17188, grid=grid(17188), stream=stream0) del buf13 del primals_15 return (buf14, buf4, buf5, primals_1, buf1, buf3, buf5, buf7, buf8, buf10, buf12, buf14, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4297), (4297, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2000, 4297), (4297, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 2000), (2000, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((5, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((5, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((100, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2000, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4297, 2000), (2000, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4297, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn from torch.nn import functional as F class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.input_linear = nn.Linear(4297, 2000) self.enc_middle = nn.Linear(2000, 100) self.enc_1 = nn.Linear(100, 5) self.enc_2 = nn.Linear(100, 5) self.dec_0 = nn.Linear(5, 100) self.dec_middle = nn.Linear(100, 2000) self.output_linear = nn.Linear(2000, 4297) def encode(self, x): h1 = F.relu(self.input_linear(x)) h2 = F.relu(self.enc_middle(h1)) return self.enc_1(h2), self.enc_2(h2) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.dec_0(z)) h4 = F.relu(self.dec_middle(h3)) return torch.sigmoid(self.output_linear(h4)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 4297)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 4297])] def get_init_inputs(): return [[], {}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 17188 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4297 x1 = xindex // 4297 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4320 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4297), (4297, 1)) assert_size_stride(primals_2, (2000, 4297), (4297, 1)) assert_size_stride(primals_3, (2000,), (1,)) assert_size_stride(primals_4, (100, 2000), (2000, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (5, 100), (100, 1)) assert_size_stride(primals_7, (5,), (1,)) assert_size_stride(primals_8, (5, 100), (100, 1)) assert_size_stride(primals_9, (5,), (1,)) assert_size_stride(primals_10, (100, 5), (5, 1)) assert_size_stride(primals_11, (100,), (1,)) assert_size_stride(primals_12, (2000, 100), (100, 1)) assert_size_stride(primals_13, (2000,), (1,)) assert_size_stride(primals_14, (4297, 2000), (2000, 1)) assert_size_stride(primals_15, (4297,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4297, 2000), (1, 4297), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(8000)](buf1, primals_3, 8000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2000, 100), (1, 2000), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (100, 5), (1, 100), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 5), (5, 1), torch.float32) triton_poi_fused_add_exp_mul_2[grid(20)](buf7, buf5, buf4, buf8, 20, XBLOCK=32, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (5, 100), (1, 5), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_1[grid(400)](buf10, primals_11, 400, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 buf11 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_12, (100, 2000), (1, 100), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_0[grid(8000)](buf12, primals_13, 8000, XBLOCK =128, num_warps=4, num_stages=1) del primals_13 buf13 = empty_strided_cuda((4, 4297), (4320, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_14, (2000, 4297 ), (1, 2000), 0), out=buf13) buf14 = empty_strided_cuda((4, 4297), (4297, 1), torch.float32) triton_poi_fused_sigmoid_3[grid(17188)](buf13, primals_15, buf14, 17188, XBLOCK=256, num_warps=4, num_stages=1) del buf13 del primals_15 return (buf14, buf4, buf5, primals_1, buf1, buf3, buf5, buf7, buf8, buf10, buf12, buf14, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): def __init__(self): super(VAENew, self).__init__() self.input_linear = nn.Linear(4297, 2000) self.enc_middle = nn.Linear(2000, 100) self.enc_1 = nn.Linear(100, 5) self.enc_2 = nn.Linear(100, 5) self.dec_0 = nn.Linear(5, 100) self.dec_middle = nn.Linear(100, 2000) self.output_linear = nn.Linear(2000, 4297) def encode(self, x): h1 = F.relu(self.input_linear(x)) h2 = F.relu(self.enc_middle(h1)) return self.enc_1(h2), self.enc_2(h2) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.dec_0(z)) h4 = F.relu(self.dec_middle(h3)) return torch.sigmoid(self.output_linear(h4)) def forward(self, input_0): primals_2 = self.input_linear.weight primals_3 = self.input_linear.bias primals_4 = self.enc_middle.weight primals_5 = self.enc_middle.bias primals_6 = self.enc_1.weight primals_7 = self.enc_1.bias primals_8 = self.enc_2.weight primals_9 = self.enc_2.bias primals_10 = self.dec_0.weight primals_11 = self.dec_0.bias primals_12 = self.dec_middle.weight primals_13 = self.dec_middle.bias primals_14 = self.output_linear.weight primals_15 = self.output_linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0], output[1], output[2]
helenaandres/adversarial-generation-of-gene-expression-data
VAE
false
10,234
[ "MIT" ]
0
9a10f0c364b7daa789ae75ab5b51ed5c7cbcbeb1
https://github.com/helenaandres/adversarial-generation-of-gene-expression-data/tree/9a10f0c364b7daa789ae75ab5b51ed5c7cbcbeb1
DispAct
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2v/c2vsyafx7g4metctsn5ppjp7y7xkbau5osryuxram5tznbyk6y23.py # Topologically Sorted Source Nodes: [softplus, clamp], Original ATen: [aten.softplus, aten.clamp] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min # softplus => exp, gt, log1p, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%where, 0.0001), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 10000.0), kwargs = {}) triton_poi_fused_clamp_softplus_0 = async_compile.triton('triton_poi_fused_clamp_softplus_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.0001 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 10000.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, clamp], Original ATen: [aten.softplus, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_softplus_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DispAct(nn.Module): def __init__(self): super(DispAct, self).__init__() def forward(self, x): return torch.clamp(F.softplus(x), min=0.0001, max=10000.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.0001 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 10000.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_softplus_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class DispActNew(nn.Module): def __init__(self): super(DispActNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jdasam/scDCC
DispAct
false
10,235
[ "Apache-2.0" ]
0
8ebaed766db5ad56021983ebc13e9a60b6c7b453
https://github.com/jdasam/scDCC/tree/8ebaed766db5ad56021983ebc13e9a60b6c7b453
KLDLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5v/c5vtwldilipwi3r3wx5rsxypbvfmsav7s7xybl5bf72d3sckrknw.py # Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] # Source node to ATen node mapping: # add => add # exp => exp # mul => mul # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %exp), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {}) triton_per_fused_add_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class KLDLoss(nn.Module): def forward(self, mu, logvar): return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class KLDLossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
izhorvath/MetGAN
KLDLoss
false
10,236
[ "BSD-3-Clause" ]
0
aca85fb3306d2515a65c8d525cd78e1147ba7e1b
https://github.com/izhorvath/MetGAN/tree/aca85fb3306d2515a65c8d525cd78e1147ba7e1b
PerturbationModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2v/c2vwatafc7bysodsvfwld7cmvue26dh5jpkxqo6qevfrerixs7yp.py # Topologically Sorted Source Nodes: [zeros_like], Original ATen: [aten.zeros_like] # Source node to ATen node mapping: # zeros_like => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_like_0 = async_compile.triton('triton_poi_fused_zeros_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_like_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/km/ckmxpzrjcatgg5st7lgobdwygl3sa5s6b4zxsma2yq476435fsk2.py # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # x => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%normal, 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [zeros_like], Original ATen: [aten.zeros_like] stream0 = get_raw_stream(0) triton_poi_fused_zeros_like_0.run(buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [zeros_like, normal], Original ATen: [aten.zeros_like, aten.normal] buf1 = torch.ops.aten.normal.Tensor_float(buf0) del buf0 buf2 = buf1 del buf1 buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_1.run(buf3, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class PerturbationModule(nn.Module): def __init__(self, T): super(PerturbationModule, self).__init__() self.T = T self.training = False self.conv_block = None def forward(self, x): if not self.training: x = x + self.T * torch.normal(torch.zeros_like(x), 1.0) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'T': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_zeros_like_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_like_0[grid(256)](buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = torch.ops.aten.normal.Tensor_float(buf0) del buf0 buf2 = buf1 del buf1 buf3 = buf2 del buf2 triton_poi_fused_add_mul_1[grid(256)](buf3, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class PerturbationModuleNew(nn.Module): def __init__(self, T): super(PerturbationModuleNew, self).__init__() self.T = T self.training = False self.conv_block = None def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jeffkinnison/pytorch-CycleGAN-and-pix2pix
PerturbationModule
false
10,237
[ "BSD-3-Clause" ]
0
e47041fa4ffa80ad5948d2d1125ec94c34c5947d
https://github.com/jeffkinnison/pytorch-CycleGAN-and-pix2pix/tree/e47041fa4ffa80ad5948d2d1125ec94c34c5947d
CIoU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/lo/clogtb3i4zusr6ps43mzqxludqyhxzh63ytsulqsxxjswkfqhj34.py # Topologically Sorted Source Nodes: [sub_8, sub_9, rec_1, sub_10, sub_11, rec_2, add_4, gt, gt_1, or_, where, minimum_1, maximum_1, sub_6, X, gt_2, gt_3, or__1, where_1, minimum_2, maximum_2, sub_7, Y, i_area, total_area, IoU, sub_13, sub, pow_1, sub_1, pow_2, add_2, p, pow_5, sub_4, pow_3, sub_5, pow_4, add_3, C, pow_6, truediv_1, DIoU, mean], Original ATen: [aten.sub, aten.mul, aten.add, aten.gt, aten.bitwise_or, aten.scalar_tensor, aten.where, aten.minimum, aten.maximum, aten.div, aten.rsub, aten.pow, aten.sqrt, aten.mean] # Source node to ATen node mapping: # C => sqrt_1 # DIoU => add_5 # IoU => div_2 # X => mul_1 # Y => mul_2 # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # gt => gt # gt_1 => gt_1 # gt_2 => gt_2 # gt_3 => gt_3 # i_area => mul_3 # maximum_1 => maximum_1 # maximum_2 => maximum_2 # mean => mean # minimum_1 => minimum_1 # minimum_2 => minimum_2 # or_ => bitwise_or # or__1 => bitwise_or_1 # p => sqrt # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # pow_4 => pow_4 # pow_5 => pow_5 # pow_6 => pow_6 # rec_1 => mul_4 # rec_2 => mul_5 # sub => sub # sub_1 => sub_1 # sub_10 => sub_10 # sub_11 => sub_11 # sub_13 => sub_13 # sub_4 => sub_4 # sub_5 => sub_5 # sub_6 => sub_6 # sub_7 => sub_7 # sub_8 => sub_8 # sub_9 => sub_9 # total_area => sub_12 # truediv_1 => div_3 # where => full_default, full_default_1, where # where_1 => full_default_2, full_default_3, where_1 # Graph fragment: # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_3, %select_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %sub_9), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_6, %select_4), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_7, %select_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_11), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%select_4, %select_2), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%select, %select_6), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%gt, %gt_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%bitwise_or, %full_default_1, %full_default), kwargs = {}) # %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%select_2, %select_6), kwargs = {}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%select, %select_4), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_1, %maximum_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %sub_6), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%select_5, %select_3), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%select_1, %select_7), kwargs = {}) # %bitwise_or_1 : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%gt_2, %gt_3), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%bitwise_or_1, %full_default_3, %full_default_2), kwargs = {}) # %minimum_2 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%select_3, %select_7), kwargs = {}) # %maximum_2 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%select_1, %select_5), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_2, %maximum_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, %sub_7), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %mul_3), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %sub_12), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_10, %select_8), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_11, %select_9), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt, 2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_14, %select_12), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_15, %select_13), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt_1, 2), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_5, %pow_6), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_13, %div_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_5,), kwargs = {}) triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0 = async_compile.triton('triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp67 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp81 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp83 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp0 = tl.full([1, 1], 2, tl.int64) tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tmp0 < tmp0 tmp4 = tl.load(in_ptr0 + (tl.broadcast_to((4*r0) + 2, [XBLOCK, RBLOCK])), tmp3, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr1 + (tl.broadcast_to((4*r0) + 2, [XBLOCK, RBLOCK])), tmp3, eviction_policy='evict_last', other=0.0) tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp3, tmp6, tmp7) tmp9 = tmp0 >= tmp0 tmp10 = tl.full([1, 1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr0 + (tl.broadcast_to(2 + (4*r0) + 0, [XBLOCK, RBLOCK])), tmp9, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr1 + (tl.broadcast_to(2 + (4*r0) + 0, [XBLOCK, RBLOCK])), tmp9, eviction_policy='evict_last', other=0.0) tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tl.where(tmp3, tmp8, tmp16) tmp18 = tmp1 >= tmp1 tmp19 = tmp1 < tmp0 tmp20 = tl.load(in_ptr0 + (tl.broadcast_to((4*r0) + 0, [XBLOCK, RBLOCK])), tmp19, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr1 + (tl.broadcast_to((4*r0) + 0, [XBLOCK, RBLOCK])), tmp19, eviction_policy='evict_last', other=0.0) tmp22 = triton_helpers.minimum(tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp19, tmp22, tmp23) tmp25 = tmp1 >= tmp0 tmp26 = tmp1 < tmp10 tmp27 = tl.load(in_ptr0 + (tl.broadcast_to(2 + (4*r0) + (-2), [XBLOCK, RBLOCK])), tmp25, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr1 + (tl.broadcast_to(2 + (4*r0) + (-2), [XBLOCK, RBLOCK])), tmp25, eviction_policy='evict_last', other=0.0) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp25, tmp29, tmp30) tmp32 = tl.where(tmp19, tmp24, tmp31) tmp33 = tmp17 - tmp32 tmp34 = tl.full([1, 1], 3, tl.int64) tmp35 = tmp34 >= tmp1 tmp36 = tmp34 < tmp0 tmp37 = tl.load(in_ptr0 + (tl.broadcast_to((4*r0) + 3, [XBLOCK, RBLOCK])), tmp36, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr1 + (tl.broadcast_to((4*r0) + 3, [XBLOCK, RBLOCK])), tmp36, eviction_policy='evict_last', other=0.0) tmp39 = triton_helpers.minimum(tmp37, tmp38) tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp36, tmp39, tmp40) tmp42 = tmp34 >= tmp0 tmp43 = tmp34 < tmp10 tmp44 = tl.load(in_ptr0 + (tl.broadcast_to(2 + (4*r0) + 1, [XBLOCK, RBLOCK])), tmp42, eviction_policy='evict_last', other=0.0) tmp45 = tl.load(in_ptr1 + (tl.broadcast_to(2 + (4*r0) + 1, [XBLOCK, RBLOCK])), tmp42, eviction_policy='evict_last', other=0.0) tmp46 = triton_helpers.maximum(tmp44, tmp45) tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp42, tmp46, tmp47) tmp49 = tl.where(tmp36, tmp41, tmp48) tmp50 = tl.full([1, 1], 1, tl.int64) tmp51 = tmp50 >= tmp1 tmp52 = tmp50 < tmp0 tmp53 = tl.load(in_ptr0 + (tl.broadcast_to((4*r0) + 1, [XBLOCK, RBLOCK])), tmp52, eviction_policy='evict_last', other=0.0) tmp54 = tl.load(in_ptr1 + (tl.broadcast_to((4*r0) + 1, [XBLOCK, RBLOCK])), tmp52, eviction_policy='evict_last', other=0.0) tmp55 = triton_helpers.minimum(tmp53, tmp54) tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype) tmp57 = tl.where(tmp52, tmp55, tmp56) tmp58 = tmp50 >= tmp0 tmp59 = tmp50 < tmp10 tmp60 = tl.load(in_ptr0 + (tl.broadcast_to(2 + (4*r0) + (-1), [XBLOCK, RBLOCK])), tmp58, eviction_policy='evict_last', other=0.0) tmp61 = tl.load(in_ptr1 + (tl.broadcast_to(2 + (4*r0) + (-1), [XBLOCK, RBLOCK])), tmp58, eviction_policy='evict_last', other=0.0) tmp62 = triton_helpers.maximum(tmp60, tmp61) tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype) tmp64 = tl.where(tmp58, tmp62, tmp63) tmp65 = tl.where(tmp52, tmp57, tmp64) tmp66 = tmp49 - tmp65 tmp69 = tmp67 > tmp68 tmp72 = tmp70 > tmp71 tmp73 = tmp69 | tmp72 tmp74 = tl.where(tmp73, tmp1, tmp50) tmp75 = tmp74.to(tl.float32) tmp76 = triton_helpers.minimum(tmp68, tmp71) tmp77 = triton_helpers.maximum(tmp70, tmp67) tmp78 = tmp76 - tmp77 tmp79 = tmp75 * tmp78 tmp82 = tmp80 > tmp81 tmp85 = tmp83 > tmp84 tmp86 = tmp82 | tmp85 tmp87 = tl.where(tmp86, tmp1, tmp50) tmp88 = tmp87.to(tl.float32) tmp89 = triton_helpers.minimum(tmp81, tmp84) tmp90 = triton_helpers.maximum(tmp83, tmp80) tmp91 = tmp89 - tmp90 tmp92 = tmp88 * tmp91 tmp93 = tmp79 * tmp92 tmp94 = tmp68 - tmp70 tmp95 = tmp81 - tmp83 tmp96 = tmp94 * tmp95 tmp97 = tmp71 - tmp67 tmp98 = tmp84 - tmp80 tmp99 = tmp97 * tmp98 tmp100 = tmp96 + tmp99 tmp101 = tmp100 - tmp93 tmp102 = tmp67 + tmp71 tmp103 = 0.5 tmp104 = tmp102 * tmp103 tmp105 = tmp70 + tmp68 tmp106 = tmp105 * tmp103 tmp107 = tmp104 - tmp106 tmp108 = tmp107 * tmp107 tmp109 = tmp80 + tmp84 tmp110 = tmp109 * tmp103 tmp111 = tmp83 + tmp81 tmp112 = tmp111 * tmp103 tmp113 = tmp110 - tmp112 tmp114 = tmp113 * tmp113 tmp115 = tmp108 + tmp114 tmp116 = libdevice.sqrt(tmp115) tmp117 = tmp116 * tmp116 tmp118 = tmp33 * tmp33 tmp119 = tmp66 * tmp66 tmp120 = tmp118 + tmp119 tmp121 = libdevice.sqrt(tmp120) tmp122 = tmp121 * tmp121 tmp123 = tmp117 / tmp122 tmp124 = tmp93 / tmp101 tmp125 = 1.0 tmp126 = tmp125 - tmp124 tmp127 = tmp126 + tmp123 tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK]) tmp130 = tl.sum(tmp128, 1)[:, None] tmp131 = 4.0 tmp132 = tmp130 / tmp131 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp132, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [sub_8, sub_9, rec_1, sub_10, sub_11, rec_2, add_4, gt, gt_1, or_, where, minimum_1, maximum_1, sub_6, X, gt_2, gt_3, or__1, where_1, minimum_2, maximum_2, sub_7, Y, i_area, total_area, IoU, sub_13, sub, pow_1, sub_1, pow_2, add_2, p, pow_5, sub_4, pow_3, sub_5, pow_4, add_3, C, pow_6, truediv_1, DIoU, mean], Original ATen: [aten.sub, aten.mul, aten.add, aten.gt, aten.bitwise_or, aten.scalar_tensor, aten.where, aten.minimum, aten.maximum, aten.div, aten.rsub, aten.pow, aten.sqrt, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0.run(buf6, arg1_1, arg0_1, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CIoU(nn.Module): def __init__(self): super(CIoU, self).__init__() def forward(self, inputs, targets): size = len(inputs) uL_truth = targets[:, 0:2] lR_truth = targets[:, 2:4] uL_pred = inputs[:, 0:2] lR_pred = inputs[:, 2:4] truth_cen = torch.div(torch.add(uL_truth, lR_truth), 2) pred_cen = torch.div(torch.add(uL_pred, lR_pred), 2) uL_truth_x = uL_truth[:, 0] uL_truth_y = uL_truth[:, 1] lR_truth_x = lR_truth[:, 0] lR_truth_y = lR_truth[:, 1] uL_pred_x = uL_pred[:, 0] uL_pred_y = uL_pred[:, 1] lR_pred_x = lR_pred[:, 0] lR_pred_y = lR_pred[:, 1] truth_cen_x = truth_cen[:, 0] truth_cen_y = truth_cen[:, 1] pred_cen_x = pred_cen[:, 0] pred_cen_y = pred_cen[:, 1] p = torch.sqrt((pred_cen_x - truth_cen_x) ** 2 + (pred_cen_y - truth_cen_y) ** 2) enc_X = torch.reshape(torch.minimum(uL_truth, uL_pred), (size, 2)) enc_Y = torch.reshape(torch.maximum(lR_truth, lR_pred), (size, 2)) bounding_box = torch.reshape(torch.cat((enc_X, enc_Y), 1), (size, 4)) bb_uL_x = bounding_box[:, 0] bb_uL_y = bounding_box[:, 1] bb_lR_x = bounding_box[:, 2] bb_lR_y = bounding_box[:, 3] (bb_lR_x - bb_uL_x) * (bb_lR_y - bb_uL_y) C = torch.sqrt((bb_lR_x - bb_uL_x) ** 2 + (bb_lR_y - bb_uL_y) ** 2) X = torch.where(torch.gt(uL_pred_x, lR_truth_x) | torch.gt( uL_truth_x, lR_pred_x), 0, 1) * (torch.minimum(lR_truth_x, lR_pred_x) - torch.maximum(uL_truth_x, uL_pred_x)) Y = torch.where(torch.gt(uL_pred_y, lR_truth_y) | torch.gt( uL_truth_y, lR_pred_y), 0, 1) * (torch.minimum(lR_truth_y, lR_pred_y) - torch.maximum(uL_truth_y, uL_pred_y)) i_area = X * Y rec_1 = (lR_truth_x - uL_truth_x) * (lR_truth_y - uL_truth_y) rec_2 = (lR_pred_x - uL_pred_x) * (lR_pred_y - uL_pred_y) total_area = rec_1 + rec_2 - i_area IoU = i_area / total_area DIoU = 1 - IoU + p ** 2 / C ** 2 return torch.mean(DIoU) """#my own calculation first = 1 - (i_area/BBA) # this will approach 0 when i_area == bbA second = torch.where(p<5, first, torch.mul(p, first)) return torch.mean(second)""" """pred_W = lR_pred_x - uL_pred_x pred_H = torch.where((lR_pred_y - uL_pred_y)!=0, 1, 0)*(lR_pred_y - uL_pred_y) truth_W = lR_truth_x - uL_truth_x truth_H = torch.where((lR_truth_y - uL_truth_y)!=0, 1, 0)*(lR_truth_y - uL_truth_y) V = (4/(np.pi**2))*((torch.atan(torch.div(truth_W, truth_H)) - torch.atan(torch.div(pred_W, pred_H)))**2) #alpha_1 = torch.div(V, ((1 - IoU)+V)) #print("alpha 1: ", alpha_1) alpha = torch.where(torch.lt(IoU, 0.5), 0, 1)*torch.div(V, ((1-IoU)+V)) #print(torch.where(torch.lt(IoU, 0.5), 0, 1)) CIoU = 1 - (IoU - (((p**2)/(C**2)) + alpha*V)) return CIoU""" def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0( in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp67 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp81 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp83 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp0 = tl.full([1, 1], 2, tl.int64) tmp1 = tl.full([1, 1], 0, tl.int64) tmp3 = tmp0 < tmp0 tmp4 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 2, [XBLOCK, RBLOCK]), tmp3, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 2, [XBLOCK, RBLOCK]), tmp3, eviction_policy='evict_last', other=0.0) tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp3, tmp6, tmp7) tmp9 = tmp0 >= tmp0 tl.full([1, 1], 4, tl.int64) tmp12 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + 0, [XBLOCK, RBLOCK]), tmp9, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + 0, [XBLOCK, RBLOCK]), tmp9, eviction_policy='evict_last', other=0.0) tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tl.where(tmp3, tmp8, tmp16) tmp19 = tmp1 < tmp0 tmp20 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 0, [XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 0, [XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0) tmp22 = triton_helpers.minimum(tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp19, tmp22, tmp23) tmp25 = tmp1 >= tmp0 tmp27 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + -2, [XBLOCK, RBLOCK]), tmp25, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + -2, [XBLOCK, RBLOCK]), tmp25, eviction_policy='evict_last', other=0.0) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp25, tmp29, tmp30) tmp32 = tl.where(tmp19, tmp24, tmp31) tmp33 = tmp17 - tmp32 tmp34 = tl.full([1, 1], 3, tl.int64) tmp36 = tmp34 < tmp0 tmp37 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 3, [XBLOCK, RBLOCK]), tmp36, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 3, [XBLOCK, RBLOCK]), tmp36, eviction_policy='evict_last', other=0.0) tmp39 = triton_helpers.minimum(tmp37, tmp38) tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp36, tmp39, tmp40) tmp42 = tmp34 >= tmp0 tmp44 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + 1, [XBLOCK, RBLOCK]), tmp42, eviction_policy='evict_last', other=0.0) tmp45 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + 1, [XBLOCK, RBLOCK]), tmp42, eviction_policy='evict_last', other=0.0) tmp46 = triton_helpers.maximum(tmp44, tmp45) tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp42, tmp46, tmp47) tmp49 = tl.where(tmp36, tmp41, tmp48) tmp50 = tl.full([1, 1], 1, tl.int64) tmp52 = tmp50 < tmp0 tmp53 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 1, [XBLOCK, RBLOCK]), tmp52, eviction_policy='evict_last', other=0.0) tmp54 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 1, [XBLOCK, RBLOCK]), tmp52, eviction_policy='evict_last', other=0.0) tmp55 = triton_helpers.minimum(tmp53, tmp54) tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype) tmp57 = tl.where(tmp52, tmp55, tmp56) tmp58 = tmp50 >= tmp0 tmp60 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + -1, [XBLOCK, RBLOCK]), tmp58, eviction_policy='evict_last', other=0.0) tmp61 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + -1, [XBLOCK, RBLOCK]), tmp58, eviction_policy='evict_last', other=0.0) tmp62 = triton_helpers.maximum(tmp60, tmp61) tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype) tmp64 = tl.where(tmp58, tmp62, tmp63) tmp65 = tl.where(tmp52, tmp57, tmp64) tmp66 = tmp49 - tmp65 tmp69 = tmp67 > tmp68 tmp72 = tmp70 > tmp71 tmp73 = tmp69 | tmp72 tmp74 = tl.where(tmp73, tmp1, tmp50) tmp75 = tmp74.to(tl.float32) tmp76 = triton_helpers.minimum(tmp68, tmp71) tmp77 = triton_helpers.maximum(tmp70, tmp67) tmp78 = tmp76 - tmp77 tmp79 = tmp75 * tmp78 tmp82 = tmp80 > tmp81 tmp85 = tmp83 > tmp84 tmp86 = tmp82 | tmp85 tmp87 = tl.where(tmp86, tmp1, tmp50) tmp88 = tmp87.to(tl.float32) tmp89 = triton_helpers.minimum(tmp81, tmp84) tmp90 = triton_helpers.maximum(tmp83, tmp80) tmp91 = tmp89 - tmp90 tmp92 = tmp88 * tmp91 tmp93 = tmp79 * tmp92 tmp94 = tmp68 - tmp70 tmp95 = tmp81 - tmp83 tmp96 = tmp94 * tmp95 tmp97 = tmp71 - tmp67 tmp98 = tmp84 - tmp80 tmp99 = tmp97 * tmp98 tmp100 = tmp96 + tmp99 tmp101 = tmp100 - tmp93 tmp102 = tmp67 + tmp71 tmp103 = 0.5 tmp104 = tmp102 * tmp103 tmp105 = tmp70 + tmp68 tmp106 = tmp105 * tmp103 tmp107 = tmp104 - tmp106 tmp108 = tmp107 * tmp107 tmp109 = tmp80 + tmp84 tmp110 = tmp109 * tmp103 tmp111 = tmp83 + tmp81 tmp112 = tmp111 * tmp103 tmp113 = tmp110 - tmp112 tmp114 = tmp113 * tmp113 tmp115 = tmp108 + tmp114 tmp116 = libdevice.sqrt(tmp115) tmp117 = tmp116 * tmp116 tmp118 = tmp33 * tmp33 tmp119 = tmp66 * tmp66 tmp120 = tmp118 + tmp119 tmp121 = libdevice.sqrt(tmp120) tmp122 = tmp121 * tmp121 tmp123 = tmp117 / tmp122 tmp124 = tmp93 / tmp101 tmp125 = 1.0 tmp126 = tmp125 - tmp124 tmp127 = tmp126 + tmp123 tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK]) tmp130 = tl.sum(tmp128, 1)[:, None] tmp131 = 4.0 tmp132 = tmp130 / tmp131 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp132, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 get_raw_stream(0) triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0[ grid(1)](buf6, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf6, class CIoUNew(nn.Module): def __init__(self): super(CIoUNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jcscheufele/CS545_Final
CIoU
false
10,238
[ "MIT" ]
0
d86858408a9a0aab82b5d2b7e12847023d939e2e
https://github.com/jcscheufele/CS545_Final/tree/d86858408a9a0aab82b5d2b7e12847023d939e2e
BiaffineScorer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/am/cambydvhijs7vhod3hwt5aje7cqigviqetkgm6iccyrgbwhmprcb.py # Topologically Sorted Source Nodes: [input1], Original ATen: [aten.cat] # Source node to ATen node mapping: # input1 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], 3), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.add] # Source node to ATen node mapping: # bilinear => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_4), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [input1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 320, grid=grid(320), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [input2], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(primals_2, buf1, 320, grid=grid(320), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf4, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0), reinterpret_tensor(buf1, (64, 5), (5, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BiaffineScorer(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input1, input2): input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1) ], len(input1.size()) - 1) input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1) ], len(input2.size()) - 1) return self.W_bilin(input1, input2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input1_size': 4, 'input2_size': 4, 'output_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(320)](primals_2, buf1, 320, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_1[grid(256)](buf4, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0 ), reinterpret_tensor(buf1, (64, 5), (5, 1), 0) class BiaffineScorerNew(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input_0, input_1): primals_3 = self.W_bilin.weight primals_4 = self.W_bilin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
giorgianb/stanza
BiaffineScorer
false
10,239
[ "Apache-2.0" ]
0
e1ff1ab73c228739fea3ef5c012a9f1042bef2e3
https://github.com/giorgianb/stanza/tree/e1ff1ab73c228739fea3ef5c012a9f1042bef2e3
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py # Topologically Sorted Source Nodes: [act], Original ATen: [aten.tanh] # Source node to ATen node mapping: # act => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 16384, grid=grid(16384), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [act], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class Actor(nn.Module): """ Policy Network (state --> action) """ def __init__(self, state_size: 'int', action_size: 'int', hidden_size: 'int'=256): super().__init__() self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_size) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) act = torch.tanh(self.out(x)) return act def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf6, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class ActorNew(nn.Module): """ Policy Network (state --> action) """ def __init__(self, state_size: 'int', action_size: 'int', hidden_size: 'int'=256): super().__init__() self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jadenvc/puppersim
Actor
false
10,240
[ "Apache-2.0" ]
0
1b3f3e3fc0515d5d6101622e0d729c779debfd32
https://github.com/jadenvc/puppersim/tree/1b3f3e3fc0515d5d6101622e0d729c779debfd32
BboxHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ff/cffqfrlow7duxa2a27d743tueqjmutkqtb36724h4w62beoddrug.py # Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view] # Source node to ATen node mapping: # out_1 => clone # view => view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 4]), kwargs = {}) triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x4), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 64, 64), (32768, 1, 512, 8)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 8), (32768, 512, 8, 1), 0); del buf1 # reuse buf3 = reinterpret_tensor(buf2, (4, 8192, 4), (32768, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view] triton_poi_fused_clone_view_1.run(buf3, primals_2, 131072, grid=grid(131072), stream=stream0) del primals_2 return (buf3, primals_1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from itertools import product as product class BboxHead(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(BboxHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 4) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 64, 64), (32768, 1, 512, 8)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 8), (32768, 512, 8, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 8192, 4), (32768, 4, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(131072)](buf3, primals_2, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class BboxHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(BboxHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huigs/retinaface-pytorch
BboxHead
false
10,241
[ "MIT" ]
0
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
SDFNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fl/cflw6zjzdk2wqtau7m6nsei5vavjfijzxhb37zaa3xp4yxpw5yb2.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.mul] # Source node to ATen node mapping: # inputs => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/37/c37esw52rucibb46dl26rfvuzbcbxbhcpsd7ramumzunyzagvgwq.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jg/cjg527q7k5sloxuipk76c6qbvftmhbkafocncizwfc4enj3gepuu.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) triton_poi_fused__weight_norm_interface_2 = async_compile.triton('triton_poi_fused__weight_norm_interface_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kh/ckh5bbutpcj4fo2i3xv3gyn55zm6aawjpvu6b4jm6f7qxua5brtu.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.softplus] # Source node to ATen node mapping: # x_1 => div_1, exp, gt, log1p, mul_2, where # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, 100), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_2,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 100), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_2, 20.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %addmm, %div_1), kwargs = {}) triton_poi_fused_softplus_3 = async_compile.triton('triton_poi_fused_softplus_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_softplus_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 100.0 tmp2 = tmp0 * tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = 0.01 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp4, tmp0, tmp8) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wn/cwnbxkjldwk4wfaj4njkiy2y3vwd2eqanrvyckw46ywaj2vii2co.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div_3, %slice_4], -1), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 4, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr0 + (1 + (4*x1) + ((-1) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 1), (1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_2.run(primals_3, primals_2, buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_4 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.softplus] triton_poi_fused_softplus_3.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_1.run(primals_6, buf5, 4, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_2.run(primals_6, primals_5, buf5, buf6, 16, grid=grid(16), stream=stream0) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_7 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(buf7, buf8, 16, grid=grid(16), stream=stream0) del buf7 return (buf8, buf2, buf6, primals_2, primals_3, primals_5, primals_6, buf0, buf1, buf3, buf4, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn def get_embedder(multires, input_dims=3): embed_kwargs = {'include_input': True, 'input_dims': input_dims, 'max_freq_log2': multires - 1, 'num_freqs': multires, 'log_sampling': True, 'periodic_fns': [torch.sin, torch.cos]} embedder_obj = Embedder(**embed_kwargs) def embed(x, eo=embedder_obj): return eo.embed(x) return embed, embedder_obj.out_dim class Embedder: def __init__(self, **kwargs): self.kwargs = kwargs self.create_embedding_fn() def create_embedding_fn(self): embed_fns = [] d = self.kwargs['input_dims'] out_dim = 0 if self.kwargs['include_input']: embed_fns.append(lambda x: x) out_dim += d max_freq = self.kwargs['max_freq_log2'] N_freqs = self.kwargs['num_freqs'] if self.kwargs['log_sampling']: freq_bands = 2.0 ** torch.linspace(0.0, max_freq, N_freqs) else: freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** max_freq, N_freqs) for freq in freq_bands: for p_fn in self.kwargs['periodic_fns']: embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq) ) out_dim += d self.embed_fns = embed_fns self.out_dim = out_dim def embed(self, inputs): return torch.cat([fn(inputs) for fn in self.embed_fns], -1) class SDFNetwork(nn.Module): def __init__(self, d_in, d_out, d_hidden, n_layers, skip_in=(4,), multires=0, bias=0.5, scale=1, geometric_init=True, weight_norm= True, inside_outside=False): super(SDFNetwork, self).__init__() dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out] self.embed_fn_fine = None if multires > 0: embed_fn, input_ch = get_embedder(multires, input_dims=d_in) self.embed_fn_fine = embed_fn dims[0] = input_ch self.num_layers = len(dims) self.skip_in = skip_in self.scale = scale for l in range(0, self.num_layers - 1): if l + 1 in self.skip_in: out_dim = dims[l + 1] - dims[0] else: out_dim = dims[l + 1] lin = nn.Linear(dims[l], out_dim) if geometric_init: if l == self.num_layers - 2: if not inside_outside: torch.nn.init.normal_(lin.weight, mean=np.sqrt(np. pi) / np.sqrt(dims[l]), std=0.0001) torch.nn.init.constant_(lin.bias, -bias) else: torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np. pi) / np.sqrt(dims[l]), std=0.0001) torch.nn.init.constant_(lin.bias, bias) elif multires > 0 and l == 0: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.weight[:, 3:], 0.0) torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2 ) / np.sqrt(out_dim)) elif multires > 0 and l in self.skip_in: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np. sqrt(out_dim)) torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0 ) else: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np. sqrt(out_dim)) if weight_norm: lin = nn.utils.weight_norm(lin) setattr(self, 'lin' + str(l), lin) self.activation = nn.Softplus(beta=100) def forward(self, inputs): inputs = inputs * self.scale if self.embed_fn_fine is not None: inputs = self.embed_fn_fine(inputs) x = inputs for l in range(0, self.num_layers - 1): lin = getattr(self, 'lin' + str(l)) if l in self.skip_in: x = torch.cat([x, inputs], 1) / np.sqrt(2) x = lin(x) if l < self.num_layers - 2: x = self.activation(x) return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1) def sdf(self, x): return self.forward(x)[:, :1] def sdf_hidden_appearance(self, x): return self.forward(x) def gradient(self, x): x.requires_grad_(True) y = self.sdf(x) d_output = torch.ones_like(y, requires_grad=False, device=y.device) gradients = torch.autograd.grad(outputs=y, inputs=x, grad_outputs= d_output, create_graph=True, retain_graph=True, only_inputs=True)[0 ] return gradients.unsqueeze(1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_out': 4, 'd_hidden': 4, 'n_layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_softplus_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 100.0 tmp2 = tmp0 * tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = 0.01 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp4, tmp0, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp13 = tl.load(in_ptr0 + (1 + 4 * x1 + (-1 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 1), (1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_2[grid(16)](primals_3, primals_2, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_4 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_softplus_3[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(4)](primals_6, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_2[grid(16)](primals_6, primals_5, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_7 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_4[grid(16)](buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf7 return (buf8, buf2, buf6, primals_2, primals_3, primals_5, primals_6, buf0, buf1, buf3, buf4, buf5, buf6) def get_embedder(multires, input_dims=3): embed_kwargs = {'include_input': True, 'input_dims': input_dims, 'max_freq_log2': multires - 1, 'num_freqs': multires, 'log_sampling': True, 'periodic_fns': [torch.sin, torch.cos]} embedder_obj = Embedder(**embed_kwargs) def embed(x, eo=embedder_obj): return eo.embed(x) return embed, embedder_obj.out_dim class Embedder: def __init__(self, **kwargs): self.kwargs = kwargs self.create_embedding_fn() def create_embedding_fn(self): embed_fns = [] d = self.kwargs['input_dims'] out_dim = 0 if self.kwargs['include_input']: embed_fns.append(lambda x: x) out_dim += d max_freq = self.kwargs['max_freq_log2'] N_freqs = self.kwargs['num_freqs'] if self.kwargs['log_sampling']: freq_bands = 2.0 ** torch.linspace(0.0, max_freq, N_freqs) else: freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** max_freq, N_freqs) for freq in freq_bands: for p_fn in self.kwargs['periodic_fns']: embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq) ) out_dim += d self.embed_fns = embed_fns self.out_dim = out_dim def embed(self, inputs): return torch.cat([fn(inputs) for fn in self.embed_fns], -1) class SDFNetworkNew(nn.Module): def __init__(self, d_in, d_out, d_hidden, n_layers, skip_in=(4,), multires=0, bias=0.5, scale=1, geometric_init=True, weight_norm= True, inside_outside=False): super(SDFNetworkNew, self).__init__() dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out] self.embed_fn_fine = None if multires > 0: embed_fn, input_ch = get_embedder(multires, input_dims=d_in) self.embed_fn_fine = embed_fn dims[0] = input_ch self.num_layers = len(dims) self.skip_in = skip_in self.scale = scale for l in range(0, self.num_layers - 1): if l + 1 in self.skip_in: out_dim = dims[l + 1] - dims[0] else: out_dim = dims[l + 1] lin = nn.Linear(dims[l], out_dim) if geometric_init: if l == self.num_layers - 2: if not inside_outside: torch.nn.init.normal_(lin.weight, mean=np.sqrt(np. pi) / np.sqrt(dims[l]), std=0.0001) torch.nn.init.constant_(lin.bias, -bias) else: torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np. pi) / np.sqrt(dims[l]), std=0.0001) torch.nn.init.constant_(lin.bias, bias) elif multires > 0 and l == 0: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.weight[:, 3:], 0.0) torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2 ) / np.sqrt(out_dim)) elif multires > 0 and l in self.skip_in: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np. sqrt(out_dim)) torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0 ) else: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np. sqrt(out_dim)) if weight_norm: lin = nn.utils.weight_norm(lin) setattr(self, 'lin' + str(l), lin) self.activation = nn.Softplus(beta=100) def sdf(self, x): return self.forward(x)[:, :1] def sdf_hidden_appearance(self, x): return self.forward(x) def gradient(self, x): x.requires_grad_(True) y = self.sdf(x) d_output = torch.ones_like(y, requires_grad=False, device=y.device) gradients = torch.autograd.grad(outputs=y, inputs=x, grad_outputs= d_output, create_graph=True, retain_graph=True, only_inputs=True)[0 ] return gradients.unsqueeze(1) def forward(self, input_0): primals_4 = self.lin0.bias primals_2 = self.lin0.weight_g primals_1 = self.lin0.weight_v primals_7 = self.lin1.bias primals_5 = self.lin1.weight_g primals_3 = self.lin1.weight_v primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
hzwangjl/NeuS
SDFNetwork
false
10,242
[ "MIT" ]
0
f1b89176ec18e19b3848d787416dab9a1ce5300b
https://github.com/hzwangjl/NeuS/tree/f1b89176ec18e19b3848d787416dab9a1ce5300b
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y2/cy2lwgz7dq2q2z4ifepdde4l7vyyvrwcx4zjn2ezmtzcanvhv374.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (256, 8), (8, 1)) assert_size_stride(primals_4, (256, ), (1, )) assert_size_stride(primals_5, (256, 256), (256, 1)) assert_size_stride(primals_6, (256, ), (1, )) assert_size_stride(primals_7, (1, 256), (256, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 1024, grid=grid(1024), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), (1, 256), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, primals_6, 1024, grid=grid(1024), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [val], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_8 return (buf6, buf0, buf2, buf4, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class Critic(nn.Module): """ Value Network (state + action --> value) """ def __init__(self, state_size: 'int', action_size: 'int', hidden_size: 'int'=256): super().__init__() self.fc1 = nn.Linear(state_size + action_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, 1) def forward(self, state, action): x = torch.cat((state, action), dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) val = self.out(x) return val def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (256, 8), (8, 1)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (256, 256), (256, 1)) assert_size_stride(primals_6, (256,), (1,)) assert_size_stride(primals_7, (1, 256), (256, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), ( 1, 256), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(1024)](buf4, primals_6, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_8 return buf6, buf0, buf2, buf4, primals_7, primals_5 class CriticNew(nn.Module): """ Value Network (state + action --> value) """ def __init__(self, state_size: 'int', action_size: 'int', hidden_size: 'int'=256): super().__init__() self.fc1 = nn.Linear(state_size + action_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, 1) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.out.weight primals_8 = self.out.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
jadenvc/puppersim
Critic
false
10,243
[ "Apache-2.0" ]
0
1b3f3e3fc0515d5d6101622e0d729c779debfd32
https://github.com/jadenvc/puppersim/tree/1b3f3e3fc0515d5d6101622e0d729c779debfd32
LandmarkHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7a/c7auqvqakf5iup6jxhpmvimbxgbhkwojsfkb4ydpzggudkwfecso.py # Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view] # Source node to ATen node mapping: # out_1 => clone # view => view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 10]), kwargs = {}) triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 327680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + (x4), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (20, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (20, ), (1, )) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 20, 64, 64), (81920, 1, 1280, 20)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 20), (81920, 1280, 20, 1), 0); del buf1 # reuse buf3 = reinterpret_tensor(buf2, (4, 8192, 10), (81920, 10, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view] triton_poi_fused_clone_view_1.run(buf3, primals_2, 327680, grid=grid(327680), stream=stream0) del primals_2 return (buf3, primals_1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((20, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from itertools import product as product class LandmarkHead(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(LandmarkHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size= (1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 10) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (20, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 20, 64, 64), (81920, 1, 1280, 20)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 20), (81920, 1280, 20, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 8192, 10), (81920, 10, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(327680)](buf3, primals_2, 327680, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class LandmarkHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(LandmarkHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size= (1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huigs/retinaface-pytorch
LandmarkHead
false
10,244
[ "MIT" ]
0
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
SpatialSELayer1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3u/c3u6zmp5plplv5tjyzlxet5sgcucpeizysbhi7xphxjhdc6kmodq.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xe/cxeibrbdtfwskdwocgzxyywkifml4pk6oqbf64lqps7x4oktrzp6.py # Topologically Sorted Source Nodes: [squeeze_tensor, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # squeeze_tensor => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4), (4, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [squeeze_tensor, mul], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_1.run(primals_1, buf1, buf2, 64, grid=grid(64), stream=stream0) return (buf2, primals_1, primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SpatialSELayer1d(nn.Module): def __init__(self, num_channels): """ :param num_channels: No of input channels """ super(SpatialSELayer1d, self).__init__() self.conv = nn.Conv1d(num_channels, 1, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_tensor, weights=None): """ :param weights: weights for few shot learning :param input_tensor: X, shape = (batch_size, num_channels, W) :return: output_tensor """ batch_size, channel, a = input_tensor.size() if weights is not None: weights = torch.mean(weights, dim=0) weights = weights.view(1, channel, 1) out = F.conv2d(input_tensor, weights) else: out = self.conv(input_tensor) squeeze_tensor = self.sigmoid(out) return input_tensor * squeeze_tensor.view(batch_size, 1, a) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4), (4, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(64)](primals_1, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_1, primals_2, buf1 class SpatialSELayer1dNew(nn.Module): def __init__(self, num_channels): """ :param num_channels: No of input channels """ super(SpatialSELayer1dNew, self).__init__() self.conv = nn.Conv1d(num_channels, 1, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ioanvl/1d_squeeze_excitation
SpatialSELayer1d
false
10,245
[ "MIT" ]
0
f422dc4b8e7de6239a6fb7d1688048db5053e733
https://github.com/ioanvl/1d_squeeze_excitation/tree/f422dc4b8e7de6239a6fb7d1688048db5053e733
ClassHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xy/cxy5t3derdktibljcvhb4n4so7gljpvchrlgaqpvp5yr4i2tjqbi.py # Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view] # Source node to ATen node mapping: # out_1 => clone # view => view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 2]), kwargs = {}) triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 1, 256, 4)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 4), (16384, 256, 4, 1), 0); del buf1 # reuse buf3 = reinterpret_tensor(buf2, (4, 8192, 2), (16384, 2, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view] triton_poi_fused_clone_view_1.run(buf3, primals_2, 65536, grid=grid(65536), stream=stream0) del primals_2 return (buf3, primals_1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from itertools import product as product class ClassHead(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(ClassHead, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 2) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 1, 256, 4)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 4), (16384, 256, 4, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 8192, 2), (16384, 2, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(65536)](buf3, primals_2, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class ClassHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(ClassHeadNew, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huigs/retinaface-pytorch
ClassHead
false
10,246
[ "MIT" ]
0
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
CNormalized_Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ef/cefxo2fn2nc6kz3xqsftatxgyvfkkm7bbs7raicfjscwteq4iixh.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, div], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div] # Source node to ATen node mapping: # div => div # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [0]), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {}) triton_poi_fused_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_div_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, div], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch as th class CNormalized_Linear(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super(CNormalized_Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): """Feed-forward through the network.""" return th.nn.functional.linear(input, self.weight.div(self.weight. pow(2).sum(0).sqrt())) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class CNormalized_LinearNew(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super(CNormalized_LinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
edgarvardanyan/CausalDiscoveryToolbox
CNormalized_Linear
false
10,247
[ "MIT" ]
0
5497a400440b49a3af14a0c7512bcdd307c9285d
https://github.com/edgarvardanyan/CausalDiscoveryToolbox/tree/5497a400440b49a3af14a0c7512bcdd307c9285d
ChannelSELayer1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xj/cxjc6n35n4dbdwuwsgoewpxau7qnxv2giwhqraobhv67igzij74n.py # Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean] # Source node to ATen node mapping: # squeeze_tensor => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3p/c3pvtte4adxbodvqq4iab6zximg555lpk2lopxwzpt4fva4eetqt.py # Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # fc_out_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lu/cluoynjdcltxwpjubrhgdo7oh6p5oiqgebxryzgynymp3qkjpscr.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0) return (buf4, primals_1, buf0, buf2, buf3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ChannelSELayer1d(nn.Module): def __init__(self, num_channels, reduction_ratio=4): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayer1d, self).__init__() num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.activ_1 = nn.ReLU() self.activ_2 = nn.Sigmoid() def forward(self, input_tensor): """ :param input_tensor: X, shape = (batch_size, num_channels, H) :return: output tensor """ batch_size, num_channels, _H = input_tensor.size() squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean( dim=2) fc_out_1 = self.activ_1(self.fc1(squeeze_tensor)) fc_out_2 = self.activ_2(self.fc2(fc_out_1)) return input_tensor * fc_out_2.view(batch_size, num_channels, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 4 ), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(4)](buf2, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](primals_1, buf3, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) return buf4, primals_1, buf0, buf2, buf3, primals_4 class ChannelSELayer1dNew(nn.Module): def __init__(self, num_channels, reduction_ratio=4): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayer1dNew, self).__init__() num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.activ_1 = nn.ReLU() self.activ_2 = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ioanvl/1d_squeeze_excitation
ChannelSELayer1d
false
10,248
[ "MIT" ]
0
f422dc4b8e7de6239a6fb7d1688048db5053e733
https://github.com/ioanvl/1d_squeeze_excitation/tree/f422dc4b8e7de6239a6fb7d1688048db5053e733
Linear3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/64/c64ahxnpt5ixqrlolbug3qf6y4u2zqmcjekif2yu4ba4hcze2fom.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ri/cripmqky3lo3skozwggzdw7u7ybokgg2w2o3vcesam3ywnqu73dg.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] # Source node to ATen node mapping: # output_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %unsqueeze), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_2, buf1, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf3, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch as th from torch.nn import Parameter def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3D(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3D, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj_matrix=None, permutation_matrix=None): input_ = [input] if input.dim() == 2: if permutation_matrix is not None: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, permutation_matrix.shape[1]])) elif hasattr(self, 'noise'): input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features - 1])) else: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features])) if adj_matrix is not None and permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ (adj_matrix.t(). unsqueeze(2) * permutation_matrix)).transpose(0, 1)) elif adj_matrix is not None: input_.append(input_[-1] * adj_matrix.t().unsqueeze(0)) elif permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ permutation_matrix).t() ) if hasattr(self, 'noise'): self.noise.normal_() input_.append(th.cat([input_[-1], self.noise], 2)) return functional_linear3d(input_[-1], self.weight, self.bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch as th from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_2, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_2[grid(256)](buf3, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0) def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3DNew(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3DNew, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
edgarvardanyan/CausalDiscoveryToolbox
Linear3D
false
10,249
[ "MIT" ]
0
5497a400440b49a3af14a0c7512bcdd307c9285d
https://github.com/edgarvardanyan/CausalDiscoveryToolbox/tree/5497a400440b49a3af14a0c7512bcdd307c9285d
GCNLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fm/cfmazf5iwuuaknliyiq4obsjt4roufymjs6ha5ei6aoyekotoo5q.py # Topologically Sorted Source Nodes: [out_1, prelu], Original ATen: [aten.add, aten._prelu_kernel] # Source node to ATen node mapping: # out_1 => add # prelu => gt, mul, where # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %primals_4), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %add), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_add_0 = async_compile.triton('triton_poi_fused__prelu_kernel_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feat], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1, prelu], Original ATen: [aten.add, aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_add_0.run(buf1, primals_4, primals_5, buf2, 64, grid=grid(64), stream=stream0) return (buf2, primals_4, primals_5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GCNLayer(nn.Module): def __init__(self, in_ft, out_ft, bias=True): super(GCNLayer, self).__init__() self.fc = nn.Linear(in_ft, out_ft, bias=False) self.act = nn.PReLU() if bias: self.bias = nn.Parameter(torch.FloatTensor(out_ft)) self.bias.data.fill_(0.0) else: self.register_parameter('bias', None) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, feat, adj): feat = self.fc(feat) out = torch.bmm(adj, feat) if self.bias is not None: out += self.bias return self.act(out) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_ft': 4, 'out_ft': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__prelu_kernel_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused__prelu_kernel_add_0[grid(64)](buf1, primals_4, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_4, primals_5, reinterpret_tensor(primals_2, (16, 4 ), (4, 1), 0), buf1, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0) class GCNLayerNew(nn.Module): def __init__(self, in_ft, out_ft, bias=True): super(GCNLayerNew, self).__init__() self.fc = nn.Linear(in_ft, out_ft, bias=False) self.act = nn.PReLU() if bias: self.bias = nn.Parameter(torch.FloatTensor(out_ft)) self.bias.data.fill_(0.0) else: self.register_parameter('bias', None) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1): primals_4 = self.bias primals_1 = self.fc.weight primals_5 = self.act.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jaynee156/GNN-thesis
GCNLayer
false
10,250
[ "MIT" ]
0
fe8a731698dedb6cf76f7130658a646664a79b09
https://github.com/jaynee156/GNN-thesis/tree/fe8a731698dedb6cf76f7130658a646664a79b09
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/h2/ch2qmvn2pnakfb2rmi27soi6fdit5okdksuuroqly6h4mehoukws.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 23040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 576) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lr/clr4dqzywalbootuyerjzaulln46meffyy4s6x4ufxgovhz3vvt3.py # Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # x_1 => relu # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 5760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x3 = (xindex // 12) x2 = (xindex // 1440) x4 = xindex % 1440 x5 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + (1536*x2)), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kv/ckvxoo67wykkix56guvhafnmoj7ybofi4256vgkv6dblf6kd6f57.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 20 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3v/c3vjs7cwub6yw4cpmxvqbzjabjh3xu4qeeml6jobk35deiu6msr5.py # Topologically Sorted Source Nodes: [max_pool2d_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # x_2 => relu_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp18, xmask) tl.store(out_ptr2 + (x2), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xi/cxiouqdy3aqmwg2p7remcu57ny7vnw4f263cpqtavvo5sya77u64.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/i3/ci3gjxynws4b6snihkmy4mi7mtg3xhsmd2rvhtmsnpenwlazlbbl.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_5 = async_compile.triton('triton_per_fused__log_softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (10, ), (1, )) assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1)) assert_size_stride(primals_5, (20, ), (1, )) assert_size_stride(primals_6, (50, 320), (320, 1)) assert_size_stride(primals_7, (50, ), (1, )) assert_size_stride(primals_8, (10, 50), (50, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 24, 24), (5760, 576, 24, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 23040, grid=grid(23040), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 10, 12, 12), (1536, 144, 12, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 12, 12), (1440, 144, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf1, buf2, buf3, 5760, grid=grid(5760), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 8, 8), (1280, 64, 8, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 5120, grid=grid(5120), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [max_pool2d_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3.run(buf5, buf6, buf7, buf14, 1280, grid=grid(1280), stream=stream0) buf8 = empty_strided_cuda((4, 50), (50, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 200, grid=grid(200), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_5.run(buf10, buf13, 4, 10, grid=grid(4), stream=stream0) del buf10 return (buf13, primals_1, primals_2, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 320), (320, 1), 0), buf9, buf13, primals_8, primals_6, buf14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((20, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((50, 320), (320, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 50), (50, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.utils.data.distributed import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = x.view(x.shape[0], 1, 28, 28) x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.utils.data.distributed import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 23040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 576 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x3 = xindex // 12 x2 = xindex // 1440 x4 = xindex % 1440 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x3), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + 1536 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp18, xmask) tl.store(out_ptr2 + x2, tmp20, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (50, 320), (320, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (10, 50), (50, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 24, 24), (5760, 576, 24, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(23040)](buf1, primals_3, 23040, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10, 12, 12), (1536, 144, 12, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 12, 12), (1440, 144, 12, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(5760)](buf1, buf2, buf3, 5760, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 8, 8), (1280, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(5120)](buf5, primals_5, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32 ) buf14 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid (1280)](buf5, buf6, buf7, buf14, 1280, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(200)](buf9, primals_7, 200, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_5[grid(4)](buf10, buf13, 4, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf10 return (buf13, primals_1, primals_2, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 320), (320, 1), 0), buf9, buf13, primals_8, primals_6, buf14) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
iquintero/sagemaker-pytorch-container
Net
false
10,251
[ "Apache-2.0" ]
0
70f64c87e549ae833d7f2ef2f15f01542ff5678e
https://github.com/iquintero/sagemaker-pytorch-container/tree/70f64c87e549ae833d7f2ef2f15f01542ff5678e
ValueFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3m/c3mzp3ti73rkr2ycpo5n5vkpamkp63miruoys5loyr5qw563wde6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200, ), (1, )) assert_size_stride(primals_6, (1, 200), (200, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 12800, grid=grid(12800), stream=stream0) del primals_2 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 12800, grid=grid(12800), stream=stream0) del buf3 del primals_5 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 1), (1, 200), 0), alpha=1, beta=1, out=buf7) del primals_7 return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((200, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ValueFunction(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim): super(ValueFunction, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, 1) def forward(self, x): """return: scalar value""" x = F.leaky_relu(self.fc1(x), 0.2) x = F.leaky_relu(self.fc2(x), 0.2) return self.out(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (1, 200), (200, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1, buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4, buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 1), (1, 200), 0), alpha=1, beta=1, out=buf7) del primals_7 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0 ), primals_6, primals_4 class ValueFunctionNew(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim): super(ValueFunctionNew, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
himanshusahni/task-biased-url
ValueFunction
false
10,252
[ "MIT" ]
0
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
CrossAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py # Topologically Sorted Source Nodes: [x_q], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_q => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py # Topologically Sorted Source Nodes: [x_q], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_q => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ah/cahpqo3o7hv3q647n5lretlqvfljlubj4ic7gscxws4yvkm5jzff.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul_4 # Graph fragment: # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub_2 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (12, 4), (4, 1)) assert_size_stride(primals_8, (12, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [x_q], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [x_kv], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_0.run(primals_6, buf2, buf3, 4, grid=grid(4), stream=stream0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_q], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf4, 16, grid=grid(16), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_kv], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_6, buf2, buf3, primals_4, primals_5, buf6, 16, grid=grid(16), stream=stream0) del buf2 del buf3 del primals_4 del primals_5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_8, (4, ), (1, ), 4), buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_8, (4, ), (1, ), 8), buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf8) buf9 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf9, primals_8, 16, grid=grid(16), stream=stream0) del primals_8 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf9, reinterpret_tensor(buf7, (4, 1, 4), (1, 1, 4), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf10, buf11, 64, grid=grid(64), stream=stream0) buf12 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf11, buf12, 64, grid=grid(64), stream=stream0) del buf11 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf12, reinterpret_tensor(buf8, (4, 4, 1), (1, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf13, buf14, 4, 4, grid=grid(4, 4), stream=stream0) buf15 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (4, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_10 return (buf15, primals_3, primals_6, buf4, buf6, buf12, reinterpret_tensor(buf14, (4, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf8, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf9, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (4, 1), 32), reinterpret_tensor(primals_7, (4, 4), (4, 1), 16), reinterpret_tensor(primals_7, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MultiHeadAttention(nn.Module): """ Multi head attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.attention = nn.MultiheadAttention(embed_dim=num_q_channels, num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels, dropout=dropout, batch_first=True) def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None): """ Forward function. Args: x_q (`Tensor`): Query embeddings. x_kv (`Tensor`): Key embeddings. Key equals value. pad_mask (`int`): Padding mask. attn_mask (`nn.Module`): Attention mask. """ return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask, attn_mask=attn_mask)[0] class CrossAttention(nn.Module): """ Cross attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.q_norm = nn.LayerNorm(num_q_channels) self.kv_norm = nn.LayerNorm(num_kv_channels) self.attention = MultiHeadAttention(num_q_channels=num_q_channels, num_kv_channels=num_kv_channels, num_heads=num_heads, dropout= dropout) def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None): x_q = self.q_norm(x_q) x_kv = self.kv_norm(x_kv) return self.attention(x_q, x_kv, pad_mask=pad_mask, attn_mask=attn_mask ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_q_channels': 4, 'num_kv_channels': 4, 'num_heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (12, 4), (4, 1)) assert_size_stride(primals_8, (12,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_0[grid(4)](primals_6, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4 ), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_6, buf2, buf3, primals_4, primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf3 del primals_4 del primals_5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 4), buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 16), alpha= 1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 8), buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf8) buf9 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0) del buf5 triton_poi_fused_mul_2[grid(16)](buf9, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf7, (4, 1, 4), (1, 1, 4), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf10 del buf10 triton_poi_fused__softmax_4[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf12, reinterpret_tensor(buf8, (4, 4, 1), (1, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf13, buf14, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (4, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_10 return buf15, primals_3, primals_6, buf4, buf6, buf12, reinterpret_tensor( buf14, (4, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf8, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf9, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 1), 0 ), reinterpret_tensor(primals_7, (4, 4), (4, 1), 32 ), reinterpret_tensor(primals_7, (4, 4), (4, 1), 16 ), reinterpret_tensor(primals_7, (4, 4), (4, 1), 0) class MultiHeadAttention(nn.Module): """ Multi head attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.attention = nn.MultiheadAttention(embed_dim=num_q_channels, num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels, dropout=dropout, batch_first=True) def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None): """ Forward function. Args: x_q (`Tensor`): Query embeddings. x_kv (`Tensor`): Key embeddings. Key equals value. pad_mask (`int`): Padding mask. attn_mask (`nn.Module`): Attention mask. """ return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask, attn_mask=attn_mask)[0] class CrossAttentionNew(nn.Module): """ Cross attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.q_norm = nn.LayerNorm(num_q_channels) self.kv_norm = nn.LayerNorm(num_kv_channels) self.attention = MultiHeadAttention(num_q_channels=num_q_channels, num_kv_channels=num_kv_channels, num_heads=num_heads, dropout= dropout) def forward(self, input_0, input_1): primals_1 = self.q_norm.weight primals_2 = self.q_norm.bias primals_4 = self.kv_norm.weight primals_5 = self.kv_norm.bias primals_7 = self.attention.attention.in_proj_weight primals_8 = self.attention.attention.in_proj_bias primals_3 = self.attention.attention.out_proj.weight primals_10 = self.attention.attention.out_proj.bias primals_6 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
jennyli-z/towhee
CrossAttention
false
10,253
[ "Apache-2.0" ]
0
55c55fd961229575b75eae269b55090c839f8dcd
https://github.com/jennyli-z/towhee/tree/55c55fd961229575b75eae269b55090c839f8dcd
DenseBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/gk/cgkw7mtnlzc2x4r4dlaxhd27twdsy4zp5nunyiqlzxzxlut26kgd.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [2], [2], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 6) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/n4/cn4nrao6ng2dt25fyig6ikjf3xz5iqxb22ofakq7pljb2sg3u6rd.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %mul], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 8 x0 = xindex % 4 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (6*((-4) + x1)) + (24*x2)), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + (6*((-4) + x1)) + (24*x2)), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 96, grid=grid(96), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 6), (24, 6, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 96, grid=grid(96), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_3, buf1, buf3, buf4, 128, grid=grid(128), stream=stream0) return (buf4, primals_1, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): def __init__(self, in_channels, filters, dilation=2): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh * sig], dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'filters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 6), (24, 6, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(96)](buf3, primals_5, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_3, buf1, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_3, primals_4, buf1, buf3 class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlockNew(nn.Module): def __init__(self, in_channels, filters, dilation=2): super(DenseBlockNew, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, input_0): primals_1 = self.causal_conv1.causal_conv.weight primals_2 = self.causal_conv1.causal_conv.bias primals_4 = self.causal_conv2.causal_conv.weight primals_5 = self.causal_conv2.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
heyitsmine/FewRel
DenseBlock
false
10,254
[ "MIT" ]
0
2a2b8ae471298d9eb3557796a085c23b21982fb2
https://github.com/heyitsmine/FewRel/tree/2a2b8ae471298d9eb3557796a085c23b21982fb2
CausalConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/gk/cgkw7mtnlzc2x4r4dlaxhd27twdsy4zp5nunyiqlzxzxlut26kgd.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [2], [2], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 6) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 96, grid=grid(96), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0 ), primals_1, primals_3 class CausalConv1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1dNew, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, input_0): primals_1 = self.causal_conv.weight primals_2 = self.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
heyitsmine/FewRel
CausalConv1d
false
10,255
[ "MIT" ]
0
2a2b8ae471298d9eb3557796a085c23b21982fb2
https://github.com/heyitsmine/FewRel/tree/2a2b8ae471298d9eb3557796a085c23b21982fb2
QValueFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ni/cnim4hby6ukzrr5hpnqhyes24yh4u2u2me5drgtw5svnox5lhi5h.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_1 => gt, mul, where # Graph fragment: # %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (200, 8), (8, 1)) assert_size_stride(primals_4, (200, ), (1, )) assert_size_stride(primals_5, (200, 200), (200, 1)) assert_size_stride(primals_6, (200, ), (1, )) assert_size_stride(primals_7, (1, 200), (200, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 200), (1, 8), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 200), (200, 1), torch.bool) buf3 = empty_strided_cuda((4, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, buf3, 800, grid=grid(800), stream=stream0) del primals_4 buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (200, 200), (1, 200), 0), out=buf4) buf5 = empty_strided_cuda((4, 200), (200, 1), torch.bool) buf6 = empty_strided_cuda((4, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf4, primals_6, buf5, buf6, 800, grid=grid(800), stream=stream0) del buf4 del primals_6 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7, (200, 1), (1, 200), 0), alpha=1, beta=1, out=buf8) del primals_8 return (buf8, buf0, buf2, buf3, buf5, buf6, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((200, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class QValueFunction(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim, action_dim): super(QValueFunction, self).__init__() self.fc1 = nn.Linear(state_dim + action_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, 1) def forward(self, s, a): """return: scalar value""" x = torch.cat((s, a), dim=1) x = F.leaky_relu(self.fc1(x), 0.2) x = F.leaky_relu(self.fc2(x), 0.2) return self.out(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (200, 8), (8, 1)) assert_size_stride(primals_4, (200,), (1,)) assert_size_stride(primals_5, (200, 200), (200, 1)) assert_size_stride(primals_6, (200,), (1,)) assert_size_stride(primals_7, (1, 200), (200, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 200), (200, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 200), (1, 8), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 200), (200, 1), torch.bool) buf3 = empty_strided_cuda((4, 200), (200, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(800)](buf1, primals_4, buf2, buf3, 800, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf4 = buf1 del buf1 extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (200, 200), ( 1, 200), 0), out=buf4) buf5 = empty_strided_cuda((4, 200), (200, 1), torch.bool) buf6 = empty_strided_cuda((4, 200), (200, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(800)](buf4, primals_6, buf5, buf6, 800, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_6 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7, (200, 1), (1, 200), 0), alpha=1, beta=1, out=buf8) del primals_8 return buf8, buf0, buf2, buf3, buf5, buf6, primals_7, primals_5 class QValueFunctionNew(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim, action_dim): super(QValueFunctionNew, self).__init__() self.fc1 = nn.Linear(state_dim + action_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, 1) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.out.weight primals_8 = self.out.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
himanshusahni/task-biased-url
QValueFunction
false
10,256
[ "MIT" ]
0
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
Gate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/dw/cdwapf4spszv7xrakefo7xfxxiomqve2jhc3fgichvmsioyx2bqg.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] # Source node to ATen node mapping: # z => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %mul, %sub], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tmp21 = tl.full([1], 16, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tl.load(in_ptr0 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + ((4*x1) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/x4/cx4biuchnddvcnztblwsbxqa3jigqmx4dwtuiemykp7ce6bvxpvm.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 16), (16, 1)) assert_size_stride(primals_4, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 1), (1, 16), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 return (buf2, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Gate(nn.Module): def __init__(self, input_dim): super(Gate, self).__init__() self.linear = nn.Linear(input_dim * 4, 1, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, x, y): z = torch.cat([x, y, x * y, x - y], dim=2) return self.sigmoid(self.linear(z)) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 16), (16, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 1), (1, 16), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0) del buf1 triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 return buf2, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf2 class GateNew(nn.Module): def __init__(self, input_dim): super(GateNew, self).__init__() self.linear = nn.Linear(input_dim * 4, 1, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, input_0, input_1): primals_3 = self.linear.weight primals_4 = self.linear.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hgrhgy/NumSeq2SQL
Gate
false
10,257
[ "MIT" ]
0
6f22fdf108736f979afa2dbd3af14aa9ad4718aa
https://github.com/hgrhgy/NumSeq2SQL/tree/6f22fdf108736f979afa2dbd3af14aa9ad4718aa
ChannelSpatialSELayer1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xj/cxjc6n35n4dbdwuwsgoewpxau7qnxv2giwhqraobhv67igzij74n.py # Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean] # Source node to ATen node mapping: # squeeze_tensor => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/eb/cebpbupczy3a7z6yffgxybumq5trdt3jp5hxwuoo6w6cunzz7d7h.py # Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # fc_out_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/of/cofp3y7hx4rouft2pepcjvbgcrlj6tozkfzrtktr6gvdantyfca2.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4s/c4sbsf2l5uzvgqh3tfrqr7jxgu5x74lqjmbfqtnzieuqqb2uzsfw.py # Topologically Sorted Source Nodes: [mul, squeeze_tensor_1, mul_1, max_1], Original ATen: [aten.mul, aten.sigmoid, aten.maximum] # Source node to ATen node mapping: # max_1 => maximum # mul => mul # mul_1 => mul_1 # squeeze_tensor_1 => sigmoid_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid_1), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_maximum_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_maximum_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_maximum_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp0 * tmp5 tmp7 = triton_helpers.maximum(tmp3, tmp6) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2, ), (1, )) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_3, 8, grid=grid(8), stream=stream0) del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3) del primals_5 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4), (4, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 16, grid=grid(16), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, squeeze_tensor_1, mul_1, max_1], Original ATen: [aten.mul, aten.sigmoid, aten.maximum] triton_poi_fused_maximum_mul_sigmoid_3.run(primals_1, buf3, buf5, buf6, 64, grid=grid(64), stream=stream0) return (buf6, primals_1, primals_6, buf0, buf2, buf3, buf5, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ChannelSELayer1d(nn.Module): def __init__(self, num_channels, reduction_ratio=4): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayer1d, self).__init__() num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.activ_1 = nn.ReLU() self.activ_2 = nn.Sigmoid() def forward(self, input_tensor): """ :param input_tensor: X, shape = (batch_size, num_channels, H) :return: output tensor """ batch_size, num_channels, _H = input_tensor.size() squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean( dim=2) fc_out_1 = self.activ_1(self.fc1(squeeze_tensor)) fc_out_2 = self.activ_2(self.fc2(fc_out_1)) return input_tensor * fc_out_2.view(batch_size, num_channels, 1) class SpatialSELayer1d(nn.Module): def __init__(self, num_channels): """ :param num_channels: No of input channels """ super(SpatialSELayer1d, self).__init__() self.conv = nn.Conv1d(num_channels, 1, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_tensor, weights=None): """ :param weights: weights for few shot learning :param input_tensor: X, shape = (batch_size, num_channels, W) :return: output_tensor """ batch_size, channel, a = input_tensor.size() if weights is not None: weights = torch.mean(weights, dim=0) weights = weights.view(1, channel, 1) out = F.conv2d(input_tensor, weights) else: out = self.conv(input_tensor) squeeze_tensor = self.sigmoid(out) return input_tensor * squeeze_tensor.view(batch_size, 1, a) class ChannelSpatialSELayer1d(nn.Module): def __init__(self, num_channels, reduction_ratio=2): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSpatialSELayer1d, self).__init__() self.cSE = ChannelSELayer1d(num_channels, reduction_ratio) self.sSE = SpatialSELayer1d(num_channels) def forward(self, input_tensor): """ :param input_tensor: X, shape = (batch_size, num_channels, W) :return: output_tensor """ return torch.max(self.cSE(input_tensor), self.sSE(input_tensor)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_maximum_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp0 * tmp5 tmp7 = triton_helpers.maximum(tmp3, tmp6) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 2), (1, 4 ), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(8)](buf2, primals_3, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4), (4, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16)](buf5, primals_7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_maximum_mul_sigmoid_3[grid(64)](primals_1, buf3, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf6, primals_1, primals_6, buf0, buf2, buf3, buf5, primals_4 class ChannelSELayer1d(nn.Module): def __init__(self, num_channels, reduction_ratio=4): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayer1d, self).__init__() num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.activ_1 = nn.ReLU() self.activ_2 = nn.Sigmoid() def forward(self, input_tensor): """ :param input_tensor: X, shape = (batch_size, num_channels, H) :return: output tensor """ batch_size, num_channels, _H = input_tensor.size() squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean( dim=2) fc_out_1 = self.activ_1(self.fc1(squeeze_tensor)) fc_out_2 = self.activ_2(self.fc2(fc_out_1)) return input_tensor * fc_out_2.view(batch_size, num_channels, 1) class SpatialSELayer1d(nn.Module): def __init__(self, num_channels): """ :param num_channels: No of input channels """ super(SpatialSELayer1d, self).__init__() self.conv = nn.Conv1d(num_channels, 1, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_tensor, weights=None): """ :param weights: weights for few shot learning :param input_tensor: X, shape = (batch_size, num_channels, W) :return: output_tensor """ batch_size, channel, a = input_tensor.size() if weights is not None: weights = torch.mean(weights, dim=0) weights = weights.view(1, channel, 1) out = F.conv2d(input_tensor, weights) else: out = self.conv(input_tensor) squeeze_tensor = self.sigmoid(out) return input_tensor * squeeze_tensor.view(batch_size, 1, a) class ChannelSpatialSELayer1dNew(nn.Module): def __init__(self, num_channels, reduction_ratio=2): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSpatialSELayer1dNew, self).__init__() self.cSE = ChannelSELayer1d(num_channels, reduction_ratio) self.sSE = SpatialSELayer1d(num_channels) def forward(self, input_0): primals_2 = self.cSE.fc1.weight primals_3 = self.cSE.fc1.bias primals_4 = self.cSE.fc2.weight primals_5 = self.cSE.fc2.bias primals_6 = self.sSE.conv.weight primals_7 = self.sSE.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ioanvl/1d_squeeze_excitation
ChannelSpatialSELayer1d
false
10,258
[ "MIT" ]
0
f422dc4b8e7de6239a6fb7d1688048db5053e733
https://github.com/ioanvl/1d_squeeze_excitation/tree/f422dc4b8e7de6239a6fb7d1688048db5053e733
GaussianPolicyFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3m/c3mzp3ti73rkr2ycpo5n5vkpamkp63miruoys5loyr5qw563wde6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zv/czv5s7vhnnk57wjpu7nfknqnqvxuibay5jnopwryaahzkyiwjune.py # Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus] # Source node to ATen node mapping: # softplus => exp, gt_2, log1p, where_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_7,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_7, 20), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %view_7, %log1p), kwargs = {}) triton_poi_fused_softplus_2 = async_compile.triton('triton_poi_fused_softplus_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200, ), (1, )) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 200), (200, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 12800, grid=grid(12800), stream=stream0) del primals_2 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 12800, grid=grid(12800), stream=stream0) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf7, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_8, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf8) del primals_9 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus] triton_poi_fused_softplus_2.run(buf8, buf9, 256, grid=grid(256), stream=stream0) return (buf7, buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), buf7, buf8, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((200, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GaussianPolicyFunction(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim, action_dim): super(GaussianPolicyFunction, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.mu_out = nn.Linear(200, action_dim) self.sigma_out = nn.Linear(200, action_dim) def forward(self, x): """return: action between [-1,1]""" x = F.leaky_relu(self.fc1(x), 0.2) x = F.leaky_relu(self.fc2(x), 0.2) return torch.tanh(self.mu_out(x)), F.softplus(self.sigma_out(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 200), (200, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1, buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4, buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_tanh_1[grid(256)](buf7, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_8, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf8) del primals_9 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_softplus_2[grid(256)](buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf7, buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0 ), buf7, buf8, primals_8, primals_6, primals_4 class GaussianPolicyFunctionNew(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim, action_dim): super(GaussianPolicyFunctionNew, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.mu_out = nn.Linear(200, action_dim) self.sigma_out = nn.Linear(200, action_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.mu_out.weight primals_7 = self.mu_out.bias primals_8 = self.sigma_out.weight primals_9 = self.sigma_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
himanshusahni/task-biased-url
GaussianPolicyFunction
false
10,259
[ "MIT" ]
0
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
SkillDiscriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3m/c3mzp3ti73rkr2ycpo5n5vkpamkp63miruoys5loyr5qw563wde6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oo/coo5rivaroinv27r7to5gs4jb7ce7itar6epfsastoa2ig6tj65k.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200, ), (1, )) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 12800, grid=grid(12800), stream=stream0) del primals_2 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 12800, grid=grid(12800), stream=stream0) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf6) del primals_7 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), buf8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((200, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SkillDiscriminator(nn.Module): """fully connected 200x200 layers for inferring q(z|s)""" def __init__(self, state_dim, nb_skills): super(SkillDiscriminator, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, nb_skills) def forward(self, x): """return: scalar value""" x = F.leaky_relu(self.fc1(x), 0.2) x = F.leaky_relu(self.fc2(x), 0.2) logits = self.out(x) return logits, nn.LogSoftmax(dim=1)(logits) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'nb_skills': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1, buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4, buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf6) del primals_7 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf6, buf7, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(256)](buf7, buf8, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf7 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0 ), buf8, primals_6, primals_4 class SkillDiscriminatorNew(nn.Module): """fully connected 200x200 layers for inferring q(z|s)""" def __init__(self, state_dim, nb_skills): super(SkillDiscriminatorNew, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, nb_skills) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
himanshusahni/task-biased-url
SkillDiscriminator
false
10,260
[ "MIT" ]
0
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
OutConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/eb/cebivcwv62fevyvsdcdlinto77y4sx4nbcfpkpzffguhvem2ic7n.py # Topologically Sorted Source Nodes: [conv2d, tanh], Original ATen: [aten.convolution, aten.tanh] # Source node to ATen node mapping: # conv2d => convolution # tanh => tanh # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_tanh_0 = async_compile.triton('triton_poi_fused_convolution_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, tanh], Original ATen: [aten.convolution, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_convolution_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class OutConv(nn.Module): def __init__(self, inChannels, outChannels): super(OutConv, self).__init__() self.conv = nn.Conv2d(inChannels, outChannels, kernel_size=1) self.tanh = nn.Tanh() def forward(self, input_): return self.tanh(self.conv(input_)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inChannels': 4, 'outChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf1 class OutConvNew(nn.Module): def __init__(self, inChannels, outChannels): super(OutConvNew, self).__init__() self.conv = nn.Conv2d(inChannels, outChannels, kernel_size=1) self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
iabd/Dereverbify
OutConv
false
10,261
[ "MIT" ]
0
e0c2e40c6813cf5528c3e0a1d697085444fb23b2
https://github.com/iabd/Dereverbify/tree/e0c2e40c6813cf5528c3e0a1d697085444fb23b2
DiscretePolicyFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3m/c3mzp3ti73rkr2ycpo5n5vkpamkp63miruoys5loyr5qw563wde6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/j4/cj4miacghwuwo6tmp3hylr7yjqyun32g4pisr65oc2dtlcxfwv2f.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uy/cuylqrd7ye33ogvvpsnxb3skali4boxth4tryw5hn4czjzyh4a34.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200, ), (1, )) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 12800, grid=grid(12800), stream=stream0) del primals_2 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 12800, grid=grid(12800), stream=stream0) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf6) del primals_7 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), buf8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((200, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DiscretePolicyFunction(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim, action_dim): super(DiscretePolicyFunction, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, action_dim) def forward(self, x): """return: action between [-1,1]""" x = F.leaky_relu(self.fc1(x), 0.2) x = F.leaky_relu(self.fc2(x), 0.2) return F.softmax(self.out(x), 0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 200), (200, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1, buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4, buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf6) del primals_7 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0 ), buf8, primals_6, primals_4 class DiscretePolicyFunctionNew(nn.Module): """fully connected 200x200 hidden layers""" def __init__(self, state_dim, action_dim): super(DiscretePolicyFunctionNew, self).__init__() self.fc1 = nn.Linear(state_dim, 200) self.fc2 = nn.Linear(200, 200) self.out = nn.Linear(200, action_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
himanshusahni/task-biased-url
DiscretePolicyFunction
false
10,263
[ "MIT" ]
0
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
AttentionPool2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ct/cctgbe64jgxq3sxjjjqccvq653sunfecfcizp3jcofnl7uiib7wo.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # x_1 => cat # x_2 => add # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %view], -1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {}) triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 17 x3 = (xindex // 17) x4 = xindex % 68 x5 = xindex tmp15 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 17, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x5), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rq/crqhbpxwwpvdowuqzuzvjdahxv45tx2y4dpxda2rurvr5kralgbn.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 0.7071067811865475), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 68) x3 = xindex % 68 x1 = (xindex // 17) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (204*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cr/ccrjqh2olqyevqox4t3kpoubo2s2m44cuvtxmf2k3slvdcgmmlcm.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865475), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 68) x3 = xindex % 68 x1 = (xindex // 17) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (68 + x3 + (204*x2)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6p/c6p34hneq7lp7a3tjiwk44lxqe2hzbpdjgbgmanamv35xvznwb2j.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_6, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_6, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_4 = async_compile.triton('triton_per_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 68 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (17*x0)), tmp11, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wf/cwf36kt6t5p6sv4fjknukcyy4vz6ejfamnreogfggthvquklzkf6.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_3, %primals_4, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17) % 12 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oe/coet3g5rxo652nti4d4ogiwpej2mrpgh4cyidpv6aivegc4mda7s.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_11, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (68*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (17*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uk/cuk32wvqx6lzak6biu2zut26pzpqa3ell4xsxs3qv3cpuuon7kkw.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_11, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 17), (17, 1)) assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (12, ), (1, )) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0) del buf0 del primals_1 del primals_2 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 17), (204, 17, 1)) buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf2, primals_4, buf3, 272, grid=grid(272), stream=stream0) buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf2, primals_4, buf4, 272, grid=grid(272), stream=stream0) buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_4.run(buf5, buf8, 68, 17, grid=grid(68), stream=stream0) del buf5 buf9 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf9, primals_4, 816, grid=grid(816), stream=stream0) del primals_4 buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204, 1, 17), 136), out=buf10) buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf10, buf11, 16, 17, grid=grid(16, 17), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 17), (68, 17, 1)) del buf11 buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf13, primals_6, 272, grid=grid(272), stream=stream0) del primals_6 return (reinterpret_tensor(buf13, (4, 4), (68, 17), 0), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204, 17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 17), (17, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn as nn import torch as th def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) matmul_ops = 2 * b * num_spatial ** 2 * c model.total_ops += th.DoubleTensor([matmul_ops]) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self. n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length)) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) x = x + self.positional_embedding[None, :, :] x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn as nn import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 17 x3 = xindex // 17 x4 = xindex % 68 x5 = xindex tmp15 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x5, tmp16, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 68 x3 = xindex % 68 x1 = xindex // 17 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 204 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 68 x3 = xindex % 68 x1 = xindex // 17 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (68 + x3 + 204 * x2), xmask) tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 68 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 17 * x0), tmp11, rmask & xmask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 68 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 17 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 17), (17, 1)) assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 17), (204, 17, 1)) buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_mul_2[grid(272)](buf2, primals_4, buf3, 272, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_mul_3[grid(272)](buf2, primals_4, buf4, 272, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) triton_per_fused__softmax_4[grid(68)](buf5, buf8, 68, 17, XBLOCK=1, num_warps=2, num_stages=1) del buf5 buf9 = buf2 del buf2 triton_poi_fused_convolution_5[grid(816)](buf9, primals_4, 816, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204, 1, 17), 136), out=buf10) buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_convolution_6[grid(16, 17)](buf10, buf11, 16, 17, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 17), (68, 17, 1)) del buf11 buf13 = buf12 del buf12 triton_poi_fused_convolution_7[grid(272)](buf13, primals_6, 272, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return reinterpret_tensor(buf13, (4, 4), (68, 17), 0 ), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204, 17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0 ) def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) matmul_ops = 2 * b * num_spatial ** 2 * c model.total_ops += th.DoubleTensor([matmul_ops]) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self. n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length)) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class AttentionPool2dNew(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.qkv_proj.weight primals_4 = self.qkv_proj.bias primals_5 = self.c_proj.weight primals_6 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jasperhu13/deit
AttentionPool2d
false
10,265
[ "Apache-2.0" ]
0
97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
https://github.com/jasperhu13/deit/tree/97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
PatchEmbed
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/jg/cjgafsignr6eltwpgfdtyyamm7z2oofx6jlesakdp45oail3wyp7.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 4, 4], [1, 7, 7], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[67108864], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51904512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16896) % 768 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (768, 3, 1, 16, 16), (768, 256, 256, 16, 1)) assert_size_stride(primals_2, (768, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 4, 4), padding=(1, 7, 7), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 768, 66, 16, 16), (12976128, 16896, 256, 16, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 51904512, grid=grid(51904512), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 16896, 768), (12976128, 1, 16896), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((768, 3, 1, 16, 16), (768, 256, 256, 16, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PatchEmbed(nn.Module): """ PatchEmbed. """ def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1, 4, 4), padding=(1, 7, 7), conv_2d=False): super().__init__() if conv_2d: conv = nn.Conv2d else: conv = nn.Conv3d self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding) def forward(self, x): x = self.proj(x) return x.flatten(2).transpose(1, 2) def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16896 % 768 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (768, 3, 1, 16, 16), (768, 256, 256, 16, 1)) assert_size_stride(primals_2, (768,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 4, 4), padding=(1, 7, 7), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 768, 66, 16, 16), (12976128, 16896, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(51904512)](buf1, primals_2, 51904512, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 16896, 768), (12976128, 1, 16896), 0 ), primals_1, primals_3 class PatchEmbedNew(nn.Module): """ PatchEmbed. """ def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1, 4, 4), padding=(1, 7, 7), conv_2d=False): super().__init__() if conv_2d: conv = nn.Conv2d else: conv = nn.Conv3d self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jasperhu13/deit
PatchEmbed
false
10,266
[ "Apache-2.0" ]
0
97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
https://github.com/jasperhu13/deit/tree/97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
SiglogModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cu/ccuwsratos2vcijifnnadjntl37h2fz4na3ke67h5sya2t7pfkrg.py # Topologically Sorted Source Nodes: [sign, abs_1, add, log, mul], Original ATen: [aten.sign, aten.abs, aten.add, aten.log, aten.mul] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # log => log # mul => mul # sign => sign # Graph fragment: # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sign, %log), kwargs = {}) triton_poi_fused_abs_add_log_mul_sign_0 = async_compile.triton('triton_poi_fused_abs_add_log_mul_sign_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_log_mul_sign_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_log_mul_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tmp8 = tl_math.abs(tmp0) tmp9 = 1.0 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp7 * tmp11 tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sign, abs_1, add, log, mul], Original ATen: [aten.sign, aten.abs, aten.add, aten.log, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_abs_add_log_mul_sign_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def siglog(v): return v.sign() * torch.log(1 + v.abs()) class SiglogModule(nn.Module): def forward(self, v): return siglog(v) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_log_mul_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tmp8 = tl_math.abs(tmp0) tmp9 = 1.0 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp7 * tmp11 tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_log_mul_sign_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def siglog(v): return v.sign() * torch.log(1 + v.abs()) class SiglogModuleNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
finalgruntgit/diautils
SiglogModule
false
10,267
[ "MIT" ]
0
b9d7666ed5023700db01a4295430c52721acfc25
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
MeanModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/vz/cvzdeyzbjmguyc7weo3g2iu6knqdlesduaneomlvq4mxjrspo75o.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, []), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MeanModule(nn.Module): def __init__(self, *axis, keepdim=False): super().__init__() self.axis = axis self.keepdim = keepdim def forward(self, v): mean = v.mean(self.axis) if self.keepdim: dims = list(v.shape) if isinstance(self.axis, list) or isinstance(self.axis, tuple): for ax in self.axis: dims[ax] = 1 else: dims[self.axis] = 1 mean = mean.view(dims) return mean def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class MeanModuleNew(nn.Module): def __init__(self, *axis, keepdim=False): super().__init__() self.axis = axis self.keepdim = keepdim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
finalgruntgit/diautils
MeanModule
false
10,268
[ "MIT" ]
0
b9d7666ed5023700db01a4295430c52721acfc25
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fh/cfhw6jsqnchbmvzjnufqyx6wlrnerowzjkbvgzlap66bd37waupt.py # Topologically Sorted Source Nodes: [eij_1, eij_2, a, sum_1], Original ATen: [aten.add, aten.tanh, aten.exp, aten.sum] # Source node to ATen node mapping: # a => exp # eij_1 => add # eij_2 => tanh # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%tanh,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused_add_exp_sum_tanh_0 = async_compile.triton('triton_poi_fused_add_exp_sum_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_sum_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_sum_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tmp5 = tl_math.exp(tmp4) tmp9 = tmp6 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = tl_math.exp(tmp10) tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tl_math.exp(tmp17) tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp24 = libdevice.tanh(tmp23) tmp25 = tl_math.exp(tmp24) tmp26 = tmp19 + tmp25 tl.store(out_ptr0 + (x0), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ol/colig22fqpovbdb44bjwgie6kivqc5vjoux3acv7lfeqg4djbl3b.py # Topologically Sorted Source Nodes: [weighted_input, sum_2], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # sum_2 => sum_2 # weighted_input => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (1)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp23 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp24 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (2)) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp34 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp35 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (3)) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = 1e-10 tmp10 = tmp8 + tmp9 tmp11 = tmp0 * tmp10 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tl_math.exp(tmp17) tmp19 = tmp18 / tmp7 tmp20 = tmp19 + tmp9 tmp21 = tmp12 * tmp20 tmp22 = tmp11 + tmp21 tmp27 = tmp24 + tmp26 tmp28 = libdevice.tanh(tmp27) tmp29 = tl_math.exp(tmp28) tmp30 = tmp29 / tmp7 tmp31 = tmp30 + tmp9 tmp32 = tmp23 * tmp31 tmp33 = tmp22 + tmp32 tmp38 = tmp35 + tmp37 tmp39 = libdevice.tanh(tmp38) tmp40 = tl_math.exp(tmp39) tmp41 = tmp40 / tmp7 tmp42 = tmp41 + tmp9 tmp43 = tmp34 * tmp42 tmp44 = tmp33 + tmp43 tl.store(out_ptr0 + (x2), tmp44, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 1), (1, 16), torch.float32) # Topologically Sorted Source Nodes: [eij_1, eij_2, a, sum_1], Original ATen: [aten.add, aten.tanh, aten.exp, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_sum_tanh_0.run(buf0, primals_3, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weighted_input, sum_2], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_1.run(primals_1, buf0, primals_3, buf1, buf2, 64, grid=grid(64), stream=stream0) del buf1 return (buf2, primals_1, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Attention(nn.Module): def __init__(self, feature_dim, K, bias=True, **kwargs): super(Attention, self).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.K = K weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(K)) def forward(self, x): B, N, K, feature_dim = x.shape eij = torch.mm(x.contiguous().view(-1, feature_dim), self.weight).view( -1, K) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) a = a / torch.sum(a, 1, keepdim=True) + 1e-10 weighted_input = x * a.view(B, N, K, 1) return torch.sum(weighted_input, 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4, 'K': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_exp_sum_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tmp5 = tl_math.exp(tmp4) tmp9 = tmp6 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = tl_math.exp(tmp10) tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tl_math.exp(tmp17) tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp24 = libdevice.tanh(tmp23) tmp25 = tl_math.exp(tmp24) tmp26 = tmp19 + tmp25 tl.store(out_ptr0 + x0, tmp26, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr2 + 1) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp23 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp24 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr2 + 2) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp34 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr2 + 3) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = 1e-10 tmp10 = tmp8 + tmp9 tmp11 = tmp0 * tmp10 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tl_math.exp(tmp17) tmp19 = tmp18 / tmp7 tmp20 = tmp19 + tmp9 tmp21 = tmp12 * tmp20 tmp22 = tmp11 + tmp21 tmp27 = tmp24 + tmp26 tmp28 = libdevice.tanh(tmp27) tmp29 = tl_math.exp(tmp28) tmp30 = tmp29 / tmp7 tmp31 = tmp30 + tmp9 tmp32 = tmp23 * tmp31 tmp33 = tmp22 + tmp32 tmp38 = tmp35 + tmp37 tmp39 = libdevice.tanh(tmp38) tmp40 = tl_math.exp(tmp39) tmp41 = tmp40 / tmp7 tmp42 = tmp41 + tmp9 tmp43 = tmp34 * tmp42 tmp44 = tmp33 + tmp43 tl.store(out_ptr0 + x2, tmp44, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 1), (1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_sum_tanh_0[grid(16)](buf0, primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(64)](primals_1, buf0, primals_3, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf2, primals_1, primals_3, buf0 class AttentionNew(nn.Module): def __init__(self, feature_dim, K, bias=True, **kwargs): super(AttentionNew, self).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.K = K weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(K)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
heyitsmine/FewRel
Attention
false
10,269
[ "MIT" ]
0
2a2b8ae471298d9eb3557796a085c23b21982fb2
https://github.com/heyitsmine/FewRel/tree/2a2b8ae471298d9eb3557796a085c23b21982fb2
SumModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nc/cnc3hun37uugdoohqi3fxky2dy24evetrjus72n5qbjgvwiqu2cc.py # Topologically Sorted Source Nodes: [sum_2], Original ATen: [aten.sum] # Source node to ATen node mapping: # sum_2 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, []), kwargs = {}) triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sum_2], Original ATen: [aten.sum] stream0 = get_raw_stream(0) triton_per_fused_sum_0.run(arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SumModule(nn.Module): def __init__(self, *axis, keepdim=False): super().__init__() self.axis = axis self.keepdim = keepdim def forward(self, v): sum = v.sum(self.axis) if self.keepdim: dims = list(v.shape) if isinstance(self.axis, list) or isinstance(self.axis, tuple): for ax in self.axis: dims[ax] = 1 else: dims[self.axis] = 1 sum = sum.view(dims) return sum def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf0, class SumModuleNew(nn.Module): def __init__(self, *axis, keepdim=False): super().__init__() self.axis = axis self.keepdim = keepdim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
finalgruntgit/diautils
SumModule
false
10,271
[ "MIT" ]
0
b9d7666ed5023700db01a4295430c52721acfc25
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
MultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # q_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py # Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # attn_weights_4 => div_1 # sum_1 => sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_12, [1]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {}) triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4) buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0) del buf4 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div] triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.attn_dropout = attn_dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.register_parameter('in_proj_bias', None) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward(self, query, key, value, attn_mask=None): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Timesteps can be masked by supplying a T x T mask in the `attn_mask` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if qkv_same: q, k, v = self.in_proj_qkv(query) elif kv_same: q = self.in_proj_q(query) if key is None: assert value is None k = v = None else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q = q * self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) src_len = k.size(1) if self.add_zero_attn: src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: try: attn_weights += attn_mask.unsqueeze(0) except: None None assert False attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as( attn_weights) attn_weights = F.dropout(attn_weights, p=self.attn_dropout, training=self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query, **kwargs): return self._in_proj(query, end=self.embed_dim, **kwargs) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None, **kwargs): weight = kwargs.get('weight', self.in_proj_weight) bias = kwargs.get('bias', self.in_proj_bias) weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4) buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf4 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0 ), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0) class MultiheadAttentionNew(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.attn_dropout = attn_dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.register_parameter('in_proj_bias', None) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query, **kwargs): return self._in_proj(query, end=self.embed_dim, **kwargs) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None, **kwargs): weight = kwargs.get('weight', self.in_proj_weight) bias = kwargs.get('bias', self.in_proj_bias) weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def forward(self, input_0, input_1, input_2): primals_4 = self.in_proj_weight primals_5 = self.in_proj_bias primals_6 = self.out_proj.weight primals_7 = self.out_proj.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
jiahuanluo/multi_media
MultiheadAttention
false
10,272
[ "MIT" ]
0
ac5ac59dba87d0368ca656e600a85bfd9a1da28e
https://github.com/jiahuanluo/multi_media/tree/ac5ac59dba87d0368ca656e600a85bfd9a1da28e
SigsqrtModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/lf/clfueab2e77m52etqfe74c7c44ba6a2mrfnont2bg7bym7k5b67o.py # Topologically Sorted Source Nodes: [abs_1, add, sqrt, truediv], Original ATen: [aten.abs, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # sqrt => sqrt # truediv => div # Graph fragment: # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {}) triton_poi_fused_abs_add_div_sqrt_0 = async_compile.triton('triton_poi_fused_abs_add_div_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_div_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = libdevice.sqrt(tmp3) tmp5 = tmp0 / tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [abs_1, add, sqrt, truediv], Original ATen: [aten.abs, aten.add, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_abs_add_div_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def sigsqrt(v): return v / torch.sqrt(1 + v.abs()) class SigsqrtModule(nn.Module): def forward(self, v): return sigsqrt(v) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = libdevice.sqrt(tmp3) tmp5 = tmp0 / tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_div_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def sigsqrt(v): return v / torch.sqrt(1 + v.abs()) class SigsqrtModuleNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
finalgruntgit/diautils
SigsqrtModule
false
10,273
[ "MIT" ]
0
b9d7666ed5023700db01a4295430c52721acfc25
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
LearnedPositionalEncoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/qz/cqzfcogql2l446of55recclsw5glrq4luh7x4vkalb7ftzt4wgs4.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 26214400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1638400 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 3200, 512), (1638400, 512, 1)) assert_size_stride(primals_2, (4, 4, 3200, 512), (6553600, 1638400, 512, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3200, 512), (6553600, 1638400, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 26214400, grid=grid(26214400), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 3200, 512), (1638400, 512, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3200, 512), (6553600, 1638400, 512, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class LearnedPositionalEncoding(nn.Module): def __init__(self, max_position_embeddings, embedding_dim, seq_length): super(LearnedPositionalEncoding, self).__init__() self.position_embeddings = nn.Parameter(torch.zeros(1, 3200, 512)) def forward(self, x, position_ids=None): position_embeddings = self.position_embeddings return x + position_embeddings def get_inputs(): return [torch.rand([4, 4, 3200, 512])] def get_init_inputs(): return [[], {'max_position_embeddings': 4, 'embedding_dim': 4, 'seq_length': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1638400 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 3200, 512), (1638400, 512, 1)) assert_size_stride(primals_2, (4, 4, 3200, 512), (6553600, 1638400, 512, 1) ) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3200, 512), (6553600, 1638400, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(26214400)](primals_2, primals_1, buf0, 26214400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEncodingNew(nn.Module): def __init__(self, max_position_embeddings, embedding_dim, seq_length): super(LearnedPositionalEncodingNew, self).__init__() self.position_embeddings = nn.Parameter(torch.zeros(1, 3200, 512)) def forward(self, input_0): primals_1 = self.position_embeddings primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
felixquinton1/TransBTS
LearnedPositionalEncoding
false
10,274
[ "Apache-2.0" ]
0
6992c902413ba15f40ebfe9f6d5d0e3594051033
https://github.com/felixquinton1/TransBTS/tree/6992c902413ba15f40ebfe9f6d5d0e3594051033
VAELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/bc/cbcs63wgigb7unamo6vgnlmottfh4tj3mtxc7uxebftbd6mlereg.py # Topologically Sorted Source Nodes: [BCE, add, pow_1, sub, exp, sub_1, sum_1, KLD, add_1], Original ATen: [aten.binary_cross_entropy, aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] # Source node to ATen node mapping: # BCE => full_default, full_default_1, log, log1p, maximum, maximum_1, mul, mul_1, neg, sub, sub_1, sum_1 # KLD => mul_2 # add => add # add_1 => add_1 # exp => exp # pow_1 => pow_1 # sub => sub_2 # sub_1 => sub_3 # sum_1 => sum_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg2_1, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg3_1, 2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg2_1,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %exp), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, -0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_2), kwargs = {}) triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp16 = tl.load(in_ptr2 + (r0), None) tmp18 = tl.load(in_ptr3 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp17 = tmp16 + tmp1 tmp19 = tmp18 * tmp18 tmp20 = tmp17 - tmp19 tmp21 = tl_math.exp(tmp16) tmp22 = tmp20 - tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = -0.5 tmp27 = tmp25 * tmp26 tmp28 = tmp15 + tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [BCE, add, pow_1, sub, exp, sub_1, sum_1, KLD, add_1], Original ATen: [aten.binary_cross_entropy, aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class VAELoss(nn.Module): def __init__(self): super(VAELoss, self).__init__() self.bce = nn.BCELoss(reduction='sum') def forward(self, recon_x, x, mu, logvar): BCE = self.bce(recon_x, x) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0(in_out_ptr0 , in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp16 = tl.load(in_ptr2 + r0, None) tmp18 = tl.load(in_ptr3 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp17 = tmp16 + tmp1 tmp19 = tmp18 * tmp18 tmp20 = tmp17 - tmp19 tmp21 = tl_math.exp(tmp16) tmp22 = tmp20 - tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = -0.5 tmp27 = tmp25 * tmp26 tmp28 = tmp15 + tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0[grid(1) ](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class VAELossNew(nn.Module): def __init__(self): super(VAELossNew, self).__init__() self.bce = nn.BCELoss(reduction='sum') def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
jlrussin/RL_project
VAELoss
false
10,275
[ "Apache-2.0" ]
0
a8562b4797afdf5944dba768a88d779056e8506a
https://github.com/jlrussin/RL_project/tree/a8562b4797afdf5944dba768a88d779056e8506a
SoftmaxModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ef/cef5jl2dffibrzdgvry2syqh3nv4y45hqkgzbp7rs7to3eijjxsa.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [4], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wx/cwx2kruo4gzyioj66hb76yw4vgc4lxjk7wwvv5hwx3fp7vkj4o6n.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftmaxModule(nn.Module): def __init__(self, axis): super().__init__() self.axis = axis def forward(self, v): return v.softmax(self.axis) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'axis': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del buf0 return buf1, class SoftmaxModuleNew(nn.Module): def __init__(self, axis): super().__init__() self.axis = axis def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
finalgruntgit/diautils
SoftmaxModule
false
10,276
[ "MIT" ]
0
b9d7666ed5023700db01a4295430c52721acfc25
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
MultipleRegression
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/oz/cozyt2mpwzdnrqu4l42bzvdjle53cvm5zgsariudbe3lan5me2eg.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rn/crnt33v5hoi57vamo5b42adk3tmkgywnvg2dcq3srwnjgthrnszr.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x_1 => sigmoid_1 # Graph fragment: # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 64), (64, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf5) del primals_7 return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MultipleRegression(nn.Module): def __init__(self, num_features): super(MultipleRegression, self).__init__() self.fc1 = nn.Linear(num_features, 64) self.fc2 = nn.Linear(64, 128) self.output = nn.Linear(128, 1) self.act = nn.Sigmoid() def forward(self, inputs): x = self.act(self.fc1(inputs)) x = self.act(self.fc2(x)) x = self.output(x) return x def predict(self, test_inputs): x = self.act(self.fc1(test_inputs)) x = self.act(self.fc2(x)) x = self.output(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 64), (64, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(8192)](buf3, primals_5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class MultipleRegressionNew(nn.Module): def __init__(self, num_features): super(MultipleRegressionNew, self).__init__() self.fc1 = nn.Linear(num_features, 64) self.fc2 = nn.Linear(64, 128) self.output = nn.Linear(128, 1) self.act = nn.Sigmoid() def predict(self, test_inputs): x = self.act(self.fc1(test_inputs)) x = self.act(self.fc2(x)) x = self.output(x) return x def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.output.weight primals_7 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jiruifu-jerry0219/UpperLimbEstimator
MultipleRegression
false
10,277
[ "Apache-2.0" ]
0
d62deef93419934dcb33e43707dd0634a235fb9a
https://github.com/jiruifu-jerry0219/UpperLimbEstimator/tree/d62deef93419934dcb33e43707dd0634a235fb9a
SegmentationNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # out_5 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # out_5 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf8, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SegmentationNet(nn.Module): def __init__(self, feature, hidden1, hidden2, output): """ Initialize a class NeuralNet. :param batch_size: int :param hidden: int """ super(SegmentationNet, self).__init__() self.layer1 = nn.Linear(feature, hidden1) self.layer2 = nn.Linear(hidden1, hidden2) self.layer3 = nn.Linear(hidden2, output) def get_weight_norm(self): """ Return ||W|| :return: float """ layer_1_w_norm = torch.norm(self.layer1.weight, 2) layer_2_w_norm = torch.norm(self.layer2.weight, 2) layer_3_w_norm = torch.norm(self.layer3.weight, 2) return layer_1_w_norm + layer_2_w_norm + layer_3_w_norm def forward(self, inputs): """ Return a forward pass given inputs. :param inputs: user vector. :return: user vector. """ out = inputs out = self.layer1(out) out = F.relu(out) out = self.layer2(out) out = F.relu(out) out = self.layer3(out) out = F.softmax(out, dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'feature': 4, 'hidden1': 4, 'hidden2': 4, 'output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class SegmentationNetNew(nn.Module): def __init__(self, feature, hidden1, hidden2, output): """ Initialize a class NeuralNet. :param batch_size: int :param hidden: int """ super(SegmentationNetNew, self).__init__() self.layer1 = nn.Linear(feature, hidden1) self.layer2 = nn.Linear(hidden1, hidden2) self.layer3 = nn.Linear(hidden2, output) def get_weight_norm(self): """ Return ||W|| :return: float """ layer_1_w_norm = torch.norm(self.layer1.weight, 2) layer_2_w_norm = torch.norm(self.layer2.weight, 2) layer_3_w_norm = torch.norm(self.layer3.weight, 2) return layer_1_w_norm + layer_2_w_norm + layer_3_w_norm def forward(self, input_0): primals_2 = self.layer1.weight primals_3 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_6 = self.layer3.weight primals_7 = self.layer3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jinyu-hou/medium-blog-scripts
SegmentationNet
false
10,278
[ "MIT" ]
0
a645d544a4bd1c937e4ff99dca0d6e98b3abb7f9
https://github.com/jinyu-hou/medium-blog-scripts/tree/a645d544a4bd1c937e4ff99dca0d6e98b3abb7f9
LinearWithChannel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cm/ccmcgo4hhocf76otuns232vkfdobmiyhbrbzce7zxp7kc5eree6u.py # Topologically Sorted Source Nodes: [observations], Original ATen: [aten.repeat] # Source node to ATen node mapping: # observations => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_1, [4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qt/cqty7ixgnj6ymsefadavis3iwxnitomsuz2twh2sgj6lfymhccbj.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.add] # Source node to ATen node mapping: # output => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %primals_3), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [observations], Original ATen: [aten.repeat] stream0 = get_raw_stream(0) triton_poi_fused_repeat_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [observations, bmm], Original ATen: [aten.repeat, aten.bmm] extern_kernels.bmm(buf0, primals_2, out=buf1) del primals_2 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class LinearWithChannel(nn.Module): def __init__(self, input_size, output_size, channel_size): super(LinearWithChannel, self).__init__() self.channel_size = channel_size self.weight = torch.nn.Parameter(torch.zeros(channel_size, input_size, output_size)) self.bias = torch.nn.Parameter(torch.zeros(channel_size, 1, output_size)) self.reset_parameters(self.weight, self.bias) def reset_parameters(self, weights, bias): torch.nn.init.kaiming_uniform_(weights, a=np.sqrt(3)) fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(weights) bound = 1 / np.sqrt(fan_in) torch.nn.init.uniform_(bias, -bound, bound) def forward(self, observations): """ observations = torch.tensor(batch_size, input_size) weight = torch.tensor(channel_size, input_size, output_size) bias = torch.tensor(channel_size, 1, output_size) :param observations: :return: torch.tensor(channel_size, batch_size, output_size) """ observations = observations.repeat(self.channel_size, 1, 1) output = torch.bmm(observations, self.weight) + self.bias return output def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'channel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf0, primals_2, out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0) class LinearWithChannelNew(nn.Module): def __init__(self, input_size, output_size, channel_size): super(LinearWithChannelNew, self).__init__() self.channel_size = channel_size self.weight = torch.nn.Parameter(torch.zeros(channel_size, input_size, output_size)) self.bias = torch.nn.Parameter(torch.zeros(channel_size, 1, output_size)) self.reset_parameters(self.weight, self.bias) def reset_parameters(self, weights, bias): torch.nn.init.kaiming_uniform_(weights, a=np.sqrt(3)) fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(weights) bound = 1 / np.sqrt(fan_in) torch.nn.init.uniform_(bias, -bound, bound) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jilanglois-su/cobs10-dengai
LinearWithChannel
false
10,279
[ "MIT" ]
0
101d3434db6330e9794b2e266b02c93793abfb82
https://github.com/jilanglois-su/cobs10-dengai/tree/101d3434db6330e9794b2e266b02c93793abfb82
MultiHeadedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3u/c3ui6pflkqqmwiicu3k3k6nxfn3zxrzgar4nyb7sxfkreg6ab7we.py # Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_weights => div, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2) del primals_2 buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3) buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_per_fused__softmax_0.run(buf3, buf6, 64, 16, grid=grid(64), stream=stream0) del buf3 buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attended], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8) return (reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F def same_tensor(tensor, *args): """ Do the input tensors all point to the same underlying data """ for other in args: if not torch.is_tensor(other): return False if tensor.device != other.device: return False if tensor.dtype != other.dtype: return False if tensor.data_ptr() != other.data_ptr(): return False return True class MultiHeadedAttention(nn.Module): """ Implement a multi-headed attention module """ def __init__(self, embed_dim, num_heads=1): """ Initialize the attention module """ super(MultiHeadedAttention, self).__init__() assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}' self.embed_dim = embed_dim self.num_heads = num_heads self.projection_dim = embed_dim // num_heads self.scale = self.projection_dim ** -0.5 self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False) self.reset_parameters() def reset_parameters(self): """ Reset parameters using xavier initialization """ gain = nn.init.calculate_gain('linear') nn.init.xavier_uniform_(self.input_weights, gain) nn.init.xavier_uniform_(self.output_projection.weight, gain) def project(self, inputs, index=0, chunks=1): """ Produce a linear projection using the weights """ batch_size = inputs.shape[0] start = index * self.embed_dim end = start + chunks * self.embed_dim projections = F.linear(inputs, self.input_weights[start:end]).chunk( chunks, dim=-1) output_projections = [] for projection in projections: output_projections.append(projection.view(batch_size, -1, self. num_heads, self.projection_dim).transpose(2, 1).contiguous( ).view(batch_size * self.num_heads, -1, self.projection_dim)) return output_projections def attention(self, values, keys, queries, key_mask=None, mask=None): """ Scaled dot product attention with optional masks """ logits = self.scale * torch.bmm(queries, keys.transpose(2, 1)) if mask is not None: logits += mask if key_mask is not None: logits_shape = logits.shape batch_size = logits_shape[0] // self.num_heads logits = logits.view(batch_size, self.num_heads, logits_shape[1 ], logits_shape[2]) logits.masked_fill_(key_mask[:, None, None], float('-inf')) logits = logits.view(logits_shape) attn_weights = F.softmax(logits, dim=-1) attended = torch.bmm(attn_weights, values) batch_size = queries.shape[0] // self.num_heads return attended.view(batch_size, self.num_heads, -1, self. projection_dim).transpose(2, 1).contiguous().view(batch_size, - 1, self.num_heads * self.projection_dim) def forward(self, values, keys, queries, key_mask=None, attention_mask= None, num_queries=0): """ Forward pass of the attention """ None None None if same_tensor(values, keys, queries): values, keys, queries = self.project(values, chunks=3) elif same_tensor(values, keys): values, keys = self.project(values, chunks=2) queries, = self.project(queries, 2) else: values, = self.project(values, 0) keys, = self.project(keys, 1) queries, = self.project(queries, 2) if num_queries: queries = queries[:, -num_queries:] attended = self.attention(values, keys, queries, key_mask, attention_mask) return self.output_projection(attended) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2) del primals_2 buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3) buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(64)](buf3, buf6, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf3 buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8) return reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0 ), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0) def same_tensor(tensor, *args): """ Do the input tensors all point to the same underlying data """ for other in args: if not torch.is_tensor(other): return False if tensor.device != other.device: return False if tensor.dtype != other.dtype: return False if tensor.data_ptr() != other.data_ptr(): return False return True class MultiHeadedAttentionNew(nn.Module): """ Implement a multi-headed attention module """ def __init__(self, embed_dim, num_heads=1): """ Initialize the attention module """ super(MultiHeadedAttentionNew, self).__init__() assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}' self.embed_dim = embed_dim self.num_heads = num_heads self.projection_dim = embed_dim // num_heads self.scale = self.projection_dim ** -0.5 self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False) self.reset_parameters() def reset_parameters(self): """ Reset parameters using xavier initialization """ gain = nn.init.calculate_gain('linear') nn.init.xavier_uniform_(self.input_weights, gain) nn.init.xavier_uniform_(self.output_projection.weight, gain) def project(self, inputs, index=0, chunks=1): """ Produce a linear projection using the weights """ batch_size = inputs.shape[0] start = index * self.embed_dim end = start + chunks * self.embed_dim projections = F.linear(inputs, self.input_weights[start:end]).chunk( chunks, dim=-1) output_projections = [] for projection in projections: output_projections.append(projection.view(batch_size, -1, self. num_heads, self.projection_dim).transpose(2, 1).contiguous( ).view(batch_size * self.num_heads, -1, self.projection_dim)) return output_projections def attention(self, values, keys, queries, key_mask=None, mask=None): """ Scaled dot product attention with optional masks """ logits = self.scale * torch.bmm(queries, keys.transpose(2, 1)) if mask is not None: logits += mask if key_mask is not None: logits_shape = logits.shape batch_size = logits_shape[0] // self.num_heads logits = logits.view(batch_size, self.num_heads, logits_shape[1 ], logits_shape[2]) logits.masked_fill_(key_mask[:, None, None], float('-inf')) logits = logits.view(logits_shape) attn_weights = F.softmax(logits, dim=-1) attended = torch.bmm(attn_weights, values) batch_size = queries.shape[0] // self.num_heads return attended.view(batch_size, self.num_heads, -1, self. projection_dim).transpose(2, 1).contiguous().view(batch_size, - 1, self.num_heads * self.projection_dim) def forward(self, input_0, input_1, input_2): primals_2 = self.input_weights primals_5 = self.output_projection.weight primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jinga-lala/stupidNMT
MultiHeadedAttention
false
10,280
[ "BSD-3-Clause" ]
0
2a41c072c2bc622c7edd8556f552f38556d70dae
https://github.com/jinga-lala/stupidNMT/tree/2a41c072c2bc622c7edd8556f552f38556d70dae
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # h => mul_3 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch from torch import nn from torch.nn.parameter import Parameter def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLP(nn.Module): def __init__(self, n_state, config): super(MLP, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return h2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_state': 4, 'config': _mock_config(n_embd=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor( primals_1, (4, 64), (1, 4), 0) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLPNew(nn.Module): def __init__(self, n_state, config): super(MLPNew, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, input_0): primals_3 = self.c_fc.weight primals_2 = self.c_fc.bias primals_5 = self.c_proj.weight primals_4 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jamessheenworks/GPT2sQA
MLP
false
10,281
[ "Apache-2.0" ]
0
14866cb21d229281e8f8b8f88aac9195bca45cd7
https://github.com/jamessheenworks/GPT2sQA/tree/14866cb21d229281e8f8b8f88aac9195bca45cd7
Classify
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) return (reinterpret_tensor(buf2, (4, 4), (4, 1), 0), primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class Flatten(nn.Module): @staticmethod def forward(x): return x.view(x.size(0), -1) class Classify(nn.Module): def __init__(self, c1, c2, k=1, s=1, p=None, g=1): super(Classify, self).__init__() self.aap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False ) self.flat = Flatten() def forward(self, x): z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) return self.flat(self.conv(z)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4, 'c2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) return reinterpret_tensor(buf2, (4, 4), (4, 1), 0), primals_2, buf1 def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class Flatten(nn.Module): @staticmethod def forward(x): return x.view(x.size(0), -1) class ClassifyNew(nn.Module): def __init__(self, c1, c2, k=1, s=1, p=None, g=1): super(ClassifyNew, self).__init__() self.aap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False ) self.flat = Flatten() def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
hyperparameters/Towards-Realtime-MOT
Classify
false
10,282
[ "MIT" ]
0
eb956a3bd5991f4895178566cb0173769977f88d
https://github.com/hyperparameters/Towards-Realtime-MOT/tree/eb956a3bd5991f4895178566cb0173769977f88d
NeuralNerwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/n7/cn7nh46rtd7kizmv3lp3mx2h5n7mmglzjvvwnon3yiv3v4iyf4ga.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4000 x2 = (xindex // 4000) tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x1 + (4096*x2)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ce/cce4cx4v7uh7jmcpcvfcdgwb4qfxzeg6zviju4yc77tmmf6u2dco.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu_2 # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2000 x1 = (xindex // 2000) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x0 + (2016*x1)), tmp2, xmask) tl.store(out_ptr1 + (x0 + (2048*x1)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/w6/cw662frhzlhbtv7e6y3yral7v4ea62wwb2adkoxku3gvogqatytc.py # Topologically Sorted Source Nodes: [x_2, linear_3], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # linear_3 => view_6 # x_2 => relu_2 # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %view_6 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_2, [64, 500]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 500 x1 = (xindex // 500) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (500*(x1 % 4)) + (2016*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uc/cucpr7hphg2dgixpqpnmigv4f4iayxbuteffgw4qgp4gk5lt3hvy.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => relu_3 # Graph fragment: # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uk/cuksa57ow4msvzqmh6rcfm45f35yxtl2zdm2melyp4zye2akg3c7.py # Topologically Sorted Source Nodes: [relu_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_4 => relu_4 # Graph fragment: # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1000, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1000, 1000), (1000, 1)) assert_size_stride(primals_4, (500, 1000), (1000, 1)) assert_size_stride(primals_5, (200, 500), (500, 1)) assert_size_stride(primals_6, (4, 200), (200, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0); del buf0 # reuse buf15 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf15, 64000, grid=grid(64000), stream=stream0) buf2 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), reinterpret_tensor(primals_3, (1000, 1000), (1, 1000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0); del buf2 # reuse buf14 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, buf14, 64000, grid=grid(64000), stream=stream0) buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0), reinterpret_tensor(primals_4, (1000, 500), (1, 1000), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf4, buf5, buf13, 32000, grid=grid(32000), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_2, linear_3], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf5, buf6, 32000, grid=grid(32000), stream=stream0) del buf5 buf7 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (500, 200), (1, 500), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf7 # reuse buf12 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf8, buf12, 12800, grid=grid(12800), stream=stream0) buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf9 # reuse buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_4.run(buf10, buf11, 256, grid=grid(256), stream=stream0) return (buf10, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0), buf6, reinterpret_tensor(buf8, (64, 200), (200, 1), 0), buf11, primals_6, buf12, primals_5, buf13, primals_4, buf14, primals_3, buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1000, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1000, 1000), (1000, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((500, 1000), (1000, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((200, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class NeuralNerwork(nn.Module): """ Construct a ReLU-activated NN, set Bias to False Four hidden layers with sizes [1000, 1000, 500, 200] Features = 784, Targets = 10 classes """ def __init__(self, features, targets): super(NeuralNerwork, self).__init__() self.fc1 = nn.Linear(features, 1000, bias=False) self.fc2 = nn.Linear(1000, 1000, bias=False) self.fc3 = nn.Linear(1000, 500, bias=False) self.fc4 = nn.Linear(500, 200, bias=False) self.fc5 = nn.Linear(200, targets, bias=False) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.relu(self.fc4(x)) return F.relu(self.fc5(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4, 'targets': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4000 x2 = xindex // 4000 tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + (x1 + 4096 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x0 + 2016 * x1), tmp2, xmask) tl.store(out_ptr1 + (x0 + 2048 * x1), tmp4, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 500 x1 = xindex // 500 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 500 * (x1 % 4) + 2016 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1000, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1000, 1000), (1000, 1)) assert_size_stride(primals_4, (500, 1000), (1000, 1)) assert_size_stride(primals_5, (200, 500), (500, 1)) assert_size_stride(primals_6, (4, 200), (200, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0) del buf0 buf15 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64000)](buf1, buf15, 64000, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0 ), reinterpret_tensor(primals_3, (1000, 1000), (1, 1000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0) del buf2 buf14 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64000)](buf3, buf14, 64000, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0 ), reinterpret_tensor(primals_4, (1000, 500), (1, 1000), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(32000)](buf4, buf5, buf13, 32000, XBLOCK=256, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused_relu_view_2[grid(32000)](buf5, buf6, 32000, XBLOCK =128, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (500, 200), ( 1, 500), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf7 buf12 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(12800)](buf8, buf12, 12800, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf9 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(256)](buf10, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) return (buf10, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0), buf6, reinterpret_tensor(buf8, (64, 200), (200, 1), 0), buf11, primals_6, buf12, primals_5, buf13, primals_4, buf14, primals_3, buf15) class NeuralNerworkNew(nn.Module): """ Construct a ReLU-activated NN, set Bias to False Four hidden layers with sizes [1000, 1000, 500, 200] Features = 784, Targets = 10 classes """ def __init__(self, features, targets): super(NeuralNerworkNew, self).__init__() self.fc1 = nn.Linear(features, 1000, bias=False) self.fc2 = nn.Linear(1000, 1000, bias=False) self.fc3 = nn.Linear(1000, 500, bias=False) self.fc4 = nn.Linear(500, 200, bias=False) self.fc5 = nn.Linear(200, targets, bias=False) def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc2.weight primals_4 = self.fc3.weight primals_5 = self.fc4.weight primals_6 = self.fc5.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jf20541/Pruning-DeepNeuralNetwork
NeuralNerwork
false
10,283
[ "MIT" ]
0
a78a88616c19aa0f1449eb562b7dd8d7c4f47252
https://github.com/jf20541/Pruning-DeepNeuralNetwork/tree/a78a88616c19aa0f1449eb562b7dd8d7c4f47252
SELayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3p/c3pvtte4adxbodvqq4iab6zximg555lpk2lopxwzpt4fva4eetqt.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # y_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xv/cxvgsfj3x2o5ls6evsy4rhywutbtjkwezlavric3plphgvn75mea.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf3, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class SELayer(nn.Module): def __init__(self, in_channels, reduction): super().__init__() mid_channels = in_channels // reduction self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, in_channels) def forward(self, x): n_batches, n_channels, _, _ = x.size() y = F.adaptive_avg_pool2d(x, output_size=1).view(n_batches, n_channels) y = F.relu(self.fc1(y), inplace=True) y = F.sigmoid(self.fc2(y)).view(n_batches, n_channels, 1, 1) return x * y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'reduction': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), buf3, buf4, primals_4 class SELayerNew(nn.Module): def __init__(self, in_channels, reduction): super().__init__() mid_channels = in_channels // reduction self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, in_channels) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
implus/pytorch_image_classification
SELayer
false
10,284
[ "MIT" ]
0
cac490ed518ad09b0429fc01af060457fb050e68
https://github.com/implus/pytorch_image_classification/tree/cac490ed518ad09b0429fc01af060457fb050e68
WeightedMultilabel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ie/cieglmnnrvea7msp3cfx77swjcuddd57vx7arxkfhqya3cusvgh5.py # Topologically Sorted Source Nodes: [loss, mul, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.mean] # Source node to ATen node mapping: # loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # mean => mean # mul => mul_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 4.0 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, mul, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class WeightedMultilabel(nn.Module): def __init__(self, weights: 'torch.Tensor'): super(WeightedMultilabel, self).__init__() self.cerition = nn.BCEWithLogitsLoss(reduction='none') self.weights = weights def forward(self, outputs, targets): loss = self.cerition(outputs, targets) return (loss * self.weights).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'weights': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 4.0 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0[grid(1)]( buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class WeightedMultilabelNew(nn.Module): def __init__(self, weights: 'torch.Tensor'): super(WeightedMultilabelNew, self).__init__() self.cerition = nn.BCEWithLogitsLoss(reduction='none') self.weights = weights def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jiawenxiao/physionet2020_0823
WeightedMultilabel
false
10,285
[ "BSD-2-Clause" ]
0
99dd54a3f7b8cef83ff37a46223f4f979edd2e74
https://github.com/jiawenxiao/physionet2020_0823/tree/99dd54a3f7b8cef83ff37a46223f4f979edd2e74
BertLayerNormNoVar
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ke/ckesoupia4od4yj57n7ovmr2wav7eopyyngdjidks2sqhi3s4yx5.py # Topologically Sorted Source Nodes: [u, x, mul, add], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # u => mean # x => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_mean_mul_sub_0 = async_compile.triton('triton_poi_fused_add_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [u, x, mul, add], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BertLayerNormNoVar(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNoVar, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) x = x - u return self.weight * x + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class BertLayerNormNoVarNew(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNoVarNew, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jiachens/auto_LiRPA
BertLayerNormNoVar
false
10,286
[ "BSD-3-Clause" ]
0
cc1ff18e8fbc938953b20ae6a030a25761cb0b78
https://github.com/jiachens/auto_LiRPA/tree/cc1ff18e8fbc938953b20ae6a030a25761cb0b78
RobNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xr/cxrn5vznflyqhnitxjoh4jnyfpvkm4wgtins5mlsntlgyloblhb5.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 961) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qo/cqofrz2ucdnmie7dx4j4f3nuuetpkw5zatyeeig6ytxnqlcqtnh7.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 225) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7j/c7jlcpfayfgm4ionvsotdwcpvr26emperi3u36tmlugpw3iltf26.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 169) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qx/cqxm3qvuiggu3t4c6lcpw7d6fye5doscbl25sbc5lp4xjlpecsuv.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 61952 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 121) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zd/czdrgp7vhq73dxdrnx6ywh3qguvnwkm47rd2tugy7zi5jjorgl4f.py # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_4 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 100) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/t7/ct7pwmxlv35f2feojf7ultnhmcsjakimpf6r7me777fe4gwsocll.py # Topologically Sorted Source Nodes: [rotate], Original ATen: [aten.convolution] # Source node to ATen node mapping: # rotate => convolution_5 # Graph fragment: # %convolution_5 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 324 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cz/cczg2eenij2vrserkx72i6h5i653gads7tvesewjlx6e3vkay4po.py # Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution] # Source node to ATen node mapping: # bbox => convolution_6 # Graph fragment: # %convolution_6 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 81) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/io/ciordbtctsdog3wv7iph2zg2qi4tscpqmzasutg2jvi5pbeo22zb.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sigmoid_1, %sigmoid], 1), kwargs = {}) triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1620 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 81) % 5 x0 = xindex % 81 x2 = (xindex // 405) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (81*x1) + (324*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 5, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr1 + (x0 + (81*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (128, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (1, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_13, (1, ), (1, )) assert_size_stride(primals_14, (4, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_15, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 31, 31), (15376, 961, 31, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 61504, grid=grid(61504), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 15, 15), (7200, 225, 15, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 28800, grid=grid(28800), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 13, 13), (10816, 169, 13, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 43264, grid=grid(43264), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 11, 11), (15488, 121, 11, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf7, primals_9, 61952, grid=grid(61952), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 10, 10), (12800, 100, 10, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_11, 51200, grid=grid(51200), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [rotate], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 1, 9, 9), (81, 81, 9, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [rotate], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf11, primals_13, 324, grid=grid(324), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf9, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 9, 9), (324, 81, 9, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf13, primals_15, 1296, grid=grid(1296), stream=stream0) del primals_15 buf14 = empty_strided_cuda((4, 5, 9, 9), (405, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf13, buf11, buf14, 1620, grid=grid(1620), stream=stream0) return (buf14, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, buf11, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class RobNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, dilation=1) self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2) self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1) self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=1) self.conv5 = nn.Conv2d(128, 128, kernel_size=2, stride=1) self.rotate = nn.Conv2d(128, 1, kernel_size=2, stride=1) self.bbox = nn.Conv2d(128, 4, kernel_size=2, stride=1) self.sig = nn.Sigmoid() def forward(self, x): x = F.relu(self.conv1(x), inplace=True) x = F.relu(self.conv2(x), inplace=True) x = F.relu(self.conv3(x), inplace=True) x = F.relu(self.conv4(x), inplace=True) x = F.relu(self.conv5(x), inplace=True) rotate = self.rotate(x) rotate = self.sig(rotate) bbox = self.bbox(x) bbox = self.sig(bbox) out = torch.cat((bbox, rotate), dim=1) return out def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 961 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 225 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 169 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 61952 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 121 % 128 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 100 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 324 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1620 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 81 % 5 x0 = xindex % 81 x2 = xindex // 405 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 81 * x1 + 324 * x2), tmp4 & xmask, other=0.0 ) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp12 = tl.load(in_ptr1 + (x0 + 81 * x2), tmp9 & xmask, eviction_policy ='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (1, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (4, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 31, 31), (15376, 961, 31, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(61504)](buf1, primals_2, 61504, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 15, 15), (7200, 225, 15, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(28800)](buf3, primals_5, 28800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 13, 13), (10816, 169, 13, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(43264)](buf5, primals_7, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 11, 11), (15488, 121, 11, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(61952)](buf7, primals_9, 61952, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 10, 10), (12800, 100, 10, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(51200)](buf9, primals_11, 51200, XBLOCK=512, num_warps=4, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 1, 9, 9), (81, 81, 9, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_5[grid(324)](buf11, primals_13, 324, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf9, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 9, 9), (324, 81, 9, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_6[grid(1296)](buf13, primals_15, 1296, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf14 = empty_strided_cuda((4, 5, 9, 9), (405, 81, 9, 1), torch.float32 ) triton_poi_fused_cat_7[grid(1620)](buf13, buf11, buf14, 1620, XBLOCK=256, num_warps=4, num_stages=1) return (buf14, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, buf11, buf13) class RobNetNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, dilation=1) self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2) self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1) self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=1) self.conv5 = nn.Conv2d(128, 128, kernel_size=2, stride=1) self.rotate = nn.Conv2d(128, 1, kernel_size=2, stride=1) self.bbox = nn.Conv2d(128, 4, kernel_size=2, stride=1) self.sig = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_12 = self.rotate.weight primals_13 = self.rotate.bias primals_14 = self.bbox.weight primals_15 = self.bbox.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
hongrui16/rotated_detection
RobNet
false
10,287
[ "MIT" ]
0
0b0a061b0753950c20d1e52c8ae8fc59e1ceb21d
https://github.com/hongrui16/rotated_detection/tree/0b0a061b0753950c20d1e52c8ae8fc59e1ceb21d
Conv2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [2], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/l7/cl73m2z7ubizl4gjzahoztnfbxiinsybshrc4sjlnb7hovne23sz.py # Topologically Sorted Source Nodes: [sigmoid, tanh, mul], Original ATen: [aten.sigmoid, aten.tanh, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # tanh => tanh # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (8*x1)), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + (8*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp12 = tmp5 * tmp11 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 1), (8, 1, 1)) del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, tanh, mul], Original ATen: [aten.sigmoid, aten.tanh, aten.mul] triton_poi_fused_mul_sigmoid_tanh_1.run(buf1, primals_3, buf2, buf3, buf4, buf5, 16, grid=grid(16), stream=stream0) del buf1 del buf2 del primals_3 return (buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class Conv2(nn.Module): """ A convolution layer with the stride of 2. Input: x: (N, 2L+2, in_channels) numeric tensor global_cond: (N, global_cond_channels) numeric tensor Output: y: (N, L, out_channels) numeric tensor """ def __init__(self, in_channels, out_channels, global_cond_channels): super().__init__() ksz = 4 self.out_channels = out_channels if 0 < global_cond_channels: self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels, bias=False) self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2 ) wsize = 2.967 / math.sqrt(ksz * in_channels) self.conv_wide.weight.data.uniform_(-wsize, wsize) self.conv_wide.bias.data.zero_() def forward(self, x, global_cond): x1 = self.conv_wide(x.transpose(1, 2)).transpose(1, 2) if global_cond is not None: x2 = self.w_cond(global_cond).unsqueeze(1).expand(-1, x1.size(1 ), -1) else: x2 = torch.zeros_like(x1) a, b = (x1 + x2).split(self.out_channels, dim=2) return torch.sigmoid(a) * torch.tanh(b) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'global_cond_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 8 * x1), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp12 = tmp5 * tmp11 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 1), (8, 1, 1)) del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_1[grid(16)](buf1, primals_3, buf2, buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf2 del primals_3 return buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, buf4 class Conv2New(nn.Module): """ A convolution layer with the stride of 2. Input: x: (N, 2L+2, in_channels) numeric tensor global_cond: (N, global_cond_channels) numeric tensor Output: y: (N, L, out_channels) numeric tensor """ def __init__(self, in_channels, out_channels, global_cond_channels): super().__init__() ksz = 4 self.out_channels = out_channels if 0 < global_cond_channels: self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels, bias=False) self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2 ) wsize = 2.967 / math.sqrt(ksz * in_channels) self.conv_wide.weight.data.uniform_(-wsize, wsize) self.conv_wide.bias.data.zero_() def forward(self, input_0, input_1): primals_5 = self.w_cond.weight primals_2 = self.conv_wide.weight primals_3 = self.conv_wide.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jonojace/WaveRNN
Conv2
false
10,288
[ "MIT" ]
0
5ac72d5ed10262132f016f8e523bc663faa991da
https://github.com/jonojace/WaveRNN/tree/5ac72d5ed10262132f016f8e523bc663faa991da
CatKLLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/mg/cmg3rjq7o6dnngxd2py7kesfu2nr7fflmrgx6htiquznvsanm322.py # Topologically Sorted Source Nodes: [qy, sub, mul, kl], Original ATen: [aten.exp, aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # kl => sum_1 # mul => mul # qy => exp # sub => sub # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %sub), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_exp_mul_sub_sum_0 = async_compile.triton('triton_poi_fused_exp_mul_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tmp0 - tmp2 tmp4 = tmp1 * tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp5 - tmp7 tmp9 = tmp6 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tl_math.exp(tmp11) tmp14 = tmp11 - tmp13 tmp15 = tmp12 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tl_math.exp(tmp17) tmp20 = tmp17 - tmp19 tmp21 = tmp18 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [qy, sub, mul, kl], Original ATen: [aten.exp, aten.sub, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_exp_mul_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn.modules.loss import _Loss class CatKLLoss(_Loss): def __init__(self, reduction='none'): super(CatKLLoss, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, log_qy, log_py): """ KL(qy|py) = Eq[qy * log(q(y) / p(y))] log_qy: (batch_size, latent_size) log_py: (batch_size, latent_size) """ qy = torch.exp(log_qy) kl = torch.sum(qy * (log_qy - log_py), dim=1) if self.reduction == 'mean': kl = kl.mean() elif self.reduction == 'sum': kl = kl.sum() return kl def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tmp0 - tmp2 tmp4 = tmp1 * tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp5 - tmp7 tmp9 = tmp6 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tl_math.exp(tmp11) tmp14 = tmp11 - tmp13 tmp15 = tmp12 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tl_math.exp(tmp17) tmp20 = tmp17 - tmp19 tmp21 = tmp18 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class CatKLLossNew(_Loss): def __init__(self, reduction='none'): super(CatKLLossNew, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
imguozhen/proactive-chat
CatKLLoss
false
10,289
[ "Apache-2.0" ]
0
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
PCN1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/o4/co4nsnmwmq6u72ocszlwnicby3irkdzg333bffvczctebolija3z.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 48 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2l/c2lopujvmnumdt346ycuertt5fmhzvjrvguon2iyn4d4fxs2achu.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 512 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (16*x2) + (144*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wv/cwvgka2tdnkfhotjblshnd7peeqx5dbyqvmgelrsa445t7sdxarg.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3q/c3qvez2r77ch5iao37cjwlxkfudhougiy5mgqj2rvlssb3674oac.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (256*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/q4/cq4jzxm6mfzoz3s2gw4gzdejerpbfgskqaexzzt3f2ymblqxix3g.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ku/ckubedjc6xofli363opw2wv3s5l2og734gsgxuoguw3xp6n4zglw.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oh/cohrpvvogjhpj5hqyf7yuwjs7gi6atbdwjl2s3qcqn4pkpe2znbw.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cl/ccl4udr4jxpg6mwurvh2p65ejlfsvbktwf7lbnwswommioowsrm2.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5q/c5q5kumendubiv2gwewlgrsy5mqbp2zgrqunx4htiexaday3bklo.py # Topologically Sorted Source Nodes: [conv2d_4, cls_prob], Original ATen: [aten.convolution, aten._softmax] # Source node to ATen node mapping: # cls_prob => amax, exp, sub # conv2d_4 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_4, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_4, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_convolution_9 = async_compile.triton('triton_poi_fused__softmax_convolution_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_convolution_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_convolution_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ag/cagkmfz6apmfdbpgeexfxmzfgz5cdokn35ktphlwqielpc5xh3rf.py # Topologically Sorted Source Nodes: [cls_prob], Original ATen: [aten._softmax] # Source node to ATen node mapping: # cls_prob => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_10 = async_compile.triton('triton_poi_fused__softmax_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (72*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((2*x2) + (72*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (2*x2) + (72*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tl.store(out_ptr0 + (x2 + (36*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7n/c7nae5ymzhakbyjocdyhdjjitxv7vnelq2hhwlqzubk4q6u3iu7c.py # Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution] # Source node to ATen node mapping: # bbox => convolution_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 3 y1 = (yindex // 3) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (108*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (36*y3)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_11, (2, ), (1, )) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2, ), (1, )) assert_size_stride(primals_14, (3, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_15, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 48, 9, grid=grid(48, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 512, 9, grid=grid(512, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 8192, 4, grid=grid(8192, 4), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 31, 31), (15376, 1, 496, 16)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 61504, grid=grid(61504), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 32, 15, 15), (7200, 1, 480, 32)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf8, primals_5, 28800, grid=grid(28800), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 7, 7), (3136, 1, 448, 64)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf10, primals_7, 12544, grid=grid(12544), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf12, primals_9, 18432, grid=grid(18432), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 2, 6, 6), (72, 1, 12, 2)) buf14 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32) # Topologically Sorted Source Nodes: [conv2d_4, cls_prob], Original ATen: [aten.convolution, aten._softmax] triton_poi_fused__softmax_convolution_9.run(buf13, primals_11, buf14, 288, grid=grid(288), stream=stream0) del primals_11 buf15 = reinterpret_tensor(buf13, (4, 2, 6, 6), (72, 36, 6, 1), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [cls_prob], Original ATen: [aten._softmax] triton_poi_fused__softmax_10.run(buf14, buf15, 8, 36, grid=grid(8, 36), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 6, 6), (72, 1, 12, 2)) buf17 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_5, rotate], Original ATen: [aten.convolution, aten._softmax] triton_poi_fused__softmax_convolution_9.run(buf16, primals_13, buf17, 288, grid=grid(288), stream=stream0) del primals_13 buf18 = reinterpret_tensor(buf16, (4, 2, 6, 6), (72, 36, 6, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [rotate], Original ATen: [aten._softmax] triton_poi_fused__softmax_10.run(buf17, buf18, 8, 36, grid=grid(8, 36), stream=stream0) del buf17 # Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 3, 6, 6), (108, 1, 18, 3)) buf20 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution] triton_poi_fused_convolution_11.run(buf19, primals_15, buf20, 12, 36, grid=grid(12, 36), stream=stream0) del buf19 del primals_15 return (buf15, buf18, buf20, buf0, buf1, buf2, buf3, buf4, primals_10, primals_12, primals_14, buf6, buf8, buf10, buf12, buf15, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((3, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PCN1(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1) self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2) self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2) self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1) self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1) self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1) self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1) def forward(self, x): x = F.relu(self.conv1(x), inplace=True) x = F.relu(self.conv2(x), inplace=True) x = F.relu(self.conv3(x), inplace=True) x = F.relu(self.conv4(x), inplace=True) cls_prob = F.softmax(self.cls_prob(x), dim=1) rotate = F.softmax(self.rotate(x), dim=1) bbox = self.bbox(x) return cls_prob, rotate, bbox def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 48 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused__softmax_convolution_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused__softmax_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 72 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (2 * x2 + 72 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x2 + 72 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tl.store(out_ptr0 + (x2 + 36 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 108 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 36 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_11, (2,), (1,)) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2,), (1,)) assert_size_stride(primals_14, (3, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_15, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(48, 9)](primals_1, buf0, 48, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch. float32) triton_poi_fused_2[grid(512, 9)](primals_4, buf2, 512, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_3[grid(2048, 9)](primals_6, buf3, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch .float32) triton_poi_fused_4[grid(8192, 4)](primals_8, buf4, 8192, 4, XBLOCK= 4, YBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 31, 31), (15376, 1, 496, 16)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(61504)](buf6, primals_2, 61504, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 32, 15, 15), (7200, 1, 480, 32)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(28800)](buf8, primals_5, 28800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 7, 7), (3136, 1, 448, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(12544)](buf10, primals_7, 12544, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_8[grid(18432)](buf12, primals_9, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 2, 6, 6), (72, 1, 12, 2)) buf14 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32) triton_poi_fused__softmax_convolution_9[grid(288)](buf13, primals_11, buf14, 288, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf15 = reinterpret_tensor(buf13, (4, 2, 6, 6), (72, 36, 6, 1), 0) del buf13 triton_poi_fused__softmax_10[grid(8, 36)](buf14, buf15, 8, 36, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) buf16 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 6, 6), (72, 1, 12, 2)) buf17 = buf14 del buf14 triton_poi_fused__softmax_convolution_9[grid(288)](buf16, primals_13, buf17, 288, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf18 = reinterpret_tensor(buf16, (4, 2, 6, 6), (72, 36, 6, 1), 0) del buf16 triton_poi_fused__softmax_10[grid(8, 36)](buf17, buf18, 8, 36, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) del buf17 buf19 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 3, 6, 6), (108, 1, 18, 3)) buf20 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32 ) triton_poi_fused_convolution_11[grid(12, 36)](buf19, primals_15, buf20, 12, 36, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf19 del primals_15 return (buf15, buf18, buf20, buf0, buf1, buf2, buf3, buf4, primals_10, primals_12, primals_14, buf6, buf8, buf10, buf12, buf15, buf18) class PCN1New(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1) self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2) self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2) self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1) self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1) self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1) self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.rotate.weight primals_11 = self.rotate.bias primals_12 = self.cls_prob.weight primals_13 = self.cls_prob.bias primals_14 = self.bbox.weight primals_15 = self.bbox.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0], output[1], output[2]
jisheng047/blinsert
PCN1
false
10,290
[ "BSD-2-Clause" ]
0
923d2ea2af3f2f257c817fa8de02c7db8ec9bcc9
https://github.com/jisheng047/blinsert/tree/923d2ea2af3f2f257c817fa8de02c7db8ec9bcc9
MaskBCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/73/c73o3odiipn2rhj6wxeyqlraepquu766hx3b5wyoe4fllotyuuh4.py # Topologically Sorted Source Nodes: [bce, bce_1, bce_2], Original ATen: [aten.binary_cross_entropy, aten.sum, aten.mean] # Source node to ATen node mapping: # bce => full_default, full_default_1, log, log1p, maximum, maximum_1, mul, mul_1, neg, sub, sub_1 # bce_1 => sum_1 # bce_2 => mean # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_mean_sum_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_mean_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mean_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_mean_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp3 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp13 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp25 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp27 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp37 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp39 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp14 = tmp13 - tmp1 tmp16 = -tmp15 tmp17 = libdevice.log1p(tmp16) tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp14 * tmp18 tmp20 = tl_math.log(tmp15) tmp21 = triton_helpers.maximum(tmp20, tmp6) tmp22 = tmp13 * tmp21 tmp23 = tmp19 - tmp22 tmp24 = tmp12 + tmp23 tmp26 = tmp25 - tmp1 tmp28 = -tmp27 tmp29 = libdevice.log1p(tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp6) tmp31 = tmp26 * tmp30 tmp32 = tl_math.log(tmp27) tmp33 = triton_helpers.maximum(tmp32, tmp6) tmp34 = tmp25 * tmp33 tmp35 = tmp31 - tmp34 tmp36 = tmp24 + tmp35 tmp38 = tmp37 - tmp1 tmp40 = -tmp39 tmp41 = libdevice.log1p(tmp40) tmp42 = triton_helpers.maximum(tmp41, tmp6) tmp43 = tmp38 * tmp42 tmp44 = tl_math.log(tmp39) tmp45 = triton_helpers.maximum(tmp44, tmp6) tmp46 = tmp37 * tmp45 tmp47 = tmp43 - tmp46 tmp48 = tmp36 + tmp47 tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp51 = tl.sum(tmp49, 1)[:, None] tmp52 = 64.0 tmp53 = tmp51 / tmp52 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp53, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [bce, bce_1, bce_2], Original ATen: [aten.binary_cross_entropy, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_mean_sum_0.run(buf2, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss class MaskBCELoss(_Loss): def __init__(self, reduction='mean'): super(MaskBCELoss, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, input, target, mask=None): """ input: (batch_size, max_len) target: (batch_size, max_len) mask: (batch_size, max_len) """ bce = F.binary_cross_entropy(input=input, target=target, reduction= 'none') if mask is not None: bce *= mask.float() bce = bce.sum(dim=1) if self.reduction == 'mean': bce = bce.mean() elif self.reduction == 'sum': bce = bce.sum() return bce def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_mean_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp3 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp25 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp27 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp37 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp39 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp14 = tmp13 - tmp1 tmp16 = -tmp15 tmp17 = libdevice.log1p(tmp16) tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp14 * tmp18 tmp20 = tl_math.log(tmp15) tmp21 = triton_helpers.maximum(tmp20, tmp6) tmp22 = tmp13 * tmp21 tmp23 = tmp19 - tmp22 tmp24 = tmp12 + tmp23 tmp26 = tmp25 - tmp1 tmp28 = -tmp27 tmp29 = libdevice.log1p(tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp6) tmp31 = tmp26 * tmp30 tmp32 = tl_math.log(tmp27) tmp33 = triton_helpers.maximum(tmp32, tmp6) tmp34 = tmp25 * tmp33 tmp35 = tmp31 - tmp34 tmp36 = tmp24 + tmp35 tmp38 = tmp37 - tmp1 tmp40 = -tmp39 tmp41 = libdevice.log1p(tmp40) tmp42 = triton_helpers.maximum(tmp41, tmp6) tmp43 = tmp38 * tmp42 tmp44 = tl_math.log(tmp39) tmp45 = triton_helpers.maximum(tmp44, tmp6) tmp46 = tmp37 * tmp45 tmp47 = tmp43 - tmp46 tmp48 = tmp36 + tmp47 tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp51 = tl.sum(tmp49, 1)[:, None] tmp52 = 64.0 tmp53 = tmp51 / tmp52 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_binary_cross_entropy_mean_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class MaskBCELossNew(_Loss): def __init__(self, reduction='mean'): super(MaskBCELossNew, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
imguozhen/proactive-chat
MaskBCELoss
false
10,291
[ "Apache-2.0" ]
0
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
NormalKLLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/tl/ctldn3aotcgk3fdxe6x4kczxezzfibwich7nkw2sgl3f4g2j7xkt.py # Topologically Sorted Source Nodes: [exp, truediv, var_ratio, t1, add, sub_1, log, sub_2, mul, kl, kl_1], Original ATen: [aten.exp, aten.div, aten.pow, aten.add, aten.sub, aten.log, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # add => add # exp => exp # kl => sum_1 # kl_1 => mean # log => log # mul => mul # sub_1 => sub_1 # sub_2 => sub_2 # t1 => pow_2 # truediv => sqrt # var_ratio => pow_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%exp,), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%pow_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp13 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp17 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp25 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp29 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp37 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp41 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.exp(tmp0) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp9 = tl_math.log(tmp3) tmp10 = tmp8 - tmp9 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tmp14 = tl_math.exp(tmp13) tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 - tmp7 tmp21 = tl_math.log(tmp16) tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp11 tmp24 = tmp12 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp27 * tmp27 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 - tmp7 tmp33 = tl_math.log(tmp28) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp11 tmp36 = tmp24 + tmp35 tmp38 = tl_math.exp(tmp37) tmp39 = libdevice.sqrt(tmp38) tmp40 = tmp39 * tmp39 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = tmp43 - tmp7 tmp45 = tl_math.log(tmp40) tmp46 = tmp44 - tmp45 tmp47 = tmp46 * tmp11 tmp48 = tmp36 + tmp47 tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp51 = tl.sum(tmp49, 1)[:, None] tmp52 = 64.0 tmp53 = tmp51 / tmp52 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp53, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [exp, truediv, var_ratio, t1, add, sub_1, log, sub_2, mul, kl, kl_1], Original ATen: [aten.exp, aten.div, aten.pow, aten.add, aten.sub, aten.log, aten.mul, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0.run(buf2, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import distributions from torch.nn.modules.loss import _Loss class NormalKLLoss(_Loss): def __init__(self, reduction='mean'): super(NormalKLLoss, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, q_mu, q_logvar, p_mu=None, p_logvar=None): """ q_mu: (batch_size, latent_size) q_logvar: (batch_size, latent_size) """ if p_mu is None: p_mu = torch.zeros_like(q_mu) if p_logvar is None: p_logvar = torch.zeros_like(q_logvar) q_norm = distributions.Normal(q_mu, q_logvar.exp().sqrt()) p_norm = distributions.Normal(p_mu, p_logvar.exp().sqrt()) kl = distributions.kl_divergence(q_norm, p_norm).sum(dim=1) if self.reduction == 'mean': kl = kl.mean() elif self.reduction == 'sum': kl = kl.sum() return kl def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp17 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp25 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp29 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp37 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp41 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp9 = tl_math.log(tmp3) tmp10 = tmp8 - tmp9 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tmp14 = tl_math.exp(tmp13) tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 - tmp7 tmp21 = tl_math.log(tmp16) tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp11 tmp24 = tmp12 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp27 * tmp27 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 - tmp7 tmp33 = tl_math.log(tmp28) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp11 tmp36 = tmp24 + tmp35 tmp38 = tl_math.exp(tmp37) tmp39 = libdevice.sqrt(tmp38) tmp40 = tmp39 * tmp39 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = tmp43 - tmp7 tmp45 = tl_math.log(tmp40) tmp46 = tmp44 - tmp45 tmp47 = tmp46 * tmp11 tmp48 = tmp36 + tmp47 tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp51 = tl.sum(tmp49, 1)[:, None] tmp52 = 64.0 tmp53 = tmp51 / tmp52 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0[grid(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class NormalKLLossNew(_Loss): def __init__(self, reduction='mean'): super(NormalKLLossNew, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
imguozhen/proactive-chat
NormalKLLoss
false
10,292
[ "Apache-2.0" ]
0
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
Gather
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ww/cwwcm5rmiipzxtxuy2xmzdfvuu7aaecv32nuqtlblhmo5atmjrki.py # Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index] # Source node to ATen node mapping: # getitem => index # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%arg1_1]), kwargs = {}) triton_poi_fused_index_0 = async_compile.triton('triton_poi_fused_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (x0 + (64*tmp4)), xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index] stream0 = get_raw_stream(0) triton_poi_fused_index_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.onnx class Gather(nn.Module): def __init__(self, dim=0): self.dim = dim self.selection = [slice(None) for _ in range(dim)] super().__init__() def forward(self, input: 'torch.Tensor', indices: 'torch.Tensor'): selection = self.selection + [indices] return input.__getitem__(selection) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 64 * tmp4), xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class GatherNew(nn.Module): def __init__(self, dim=0): self.dim = dim self.selection = [slice(None) for _ in range(dim)] super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jiuntian/onnx2pytorch
Gather
false
10,293
[ "Apache-2.0" ]
0
fadca10a6045f4373293c9c0854607fb51a47c12
https://github.com/jiuntian/onnx2pytorch/tree/fadca10a6045f4373293c9c0854607fb51a47c12
GlobalAveragePool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2, 3], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.onnx class GlobalAveragePool(nn.Module): def forward(self, input: 'torch.Tensor'): spatial_shape = input.ndimension() - 2 dim = tuple(range(spatial_shape, spatial_shape + 2)) return torch.mean(input, dim=dim, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf1, class GlobalAveragePoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jiuntian/onnx2pytorch
GlobalAveragePool
false
10,294
[ "Apache-2.0" ]
0
fadca10a6045f4373293c9c0854607fb51a47c12
https://github.com/jiuntian/onnx2pytorch/tree/fadca10a6045f4373293c9c0854607fb51a47c12