entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [h_], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h_ => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xh/cxhi2oxtldw7gsvpajvcvh4iks7iujnefav4smobuv5savmjupdj.py # Topologically Sorted Source Nodes: [mul, var, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # var => exp # z => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %rand), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %mul_1), kwargs = {}) triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp5 = tl.load(in_ptr2 + (x0), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h_], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_var], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 # Topologically Sorted Source Nodes: [epsilon], Original ATen: [aten.rand_like] buf4 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, var, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] triton_poi_fused_add_exp_mul_1.run(buf2, buf3, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5, primals_6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Encoder(nn.Module): def __init__(self, input_dim, hidden_dim, latent_dim): super(Encoder, self).__init__() self.FC_input = nn.Linear(input_dim, hidden_dim) self.FC_mean = nn.Linear(hidden_dim, latent_dim) self.FC_var = nn.Linear(hidden_dim, latent_dim) self.training = True def forward(self, x): h_ = torch.relu(self.FC_input(x)) mean = self.FC_mean(h_) log_var = self.FC_var(h_) var = torch.exp(0.5 * log_var) z = self.reparameterization(mean, var) return z, mean, log_var def reparameterization(self, mean, var): epsilon = torch.rand_like(var) z = mean + var * epsilon return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4, 'latent_dim': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(256)](buf2, buf3, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf5, primals_6, primals_4, buf7 class EncoderNew(nn.Module): def __init__(self, input_dim, hidden_dim, latent_dim): super(EncoderNew, self).__init__() self.FC_input = nn.Linear(input_dim, hidden_dim) self.FC_mean = nn.Linear(hidden_dim, latent_dim) self.FC_var = nn.Linear(hidden_dim, latent_dim) self.training = True def reparameterization(self, mean, var): epsilon = torch.rand_like(var) z = mean + var * epsilon return z def forward(self, input_0): primals_1 = self.FC_input.weight primals_2 = self.FC_input.bias primals_4 = self.FC_mean.weight primals_5 = self.FC_mean.bias primals_6 = self.FC_var.weight primals_7 = self.FC_var.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1], output[2]
georgezefko/dtu_mlops
Encoder
false
10,091
[ "Apache-2.0" ]
0
3b715bcb934d0c2827d89395823b7d4768faac97
https://github.com/georgezefko/dtu_mlops/tree/3b715bcb934d0c2827d89395823b7d4768faac97
BasicModel3
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ur/curqs5ijaywea7eln4nsct7fk2aktgmcp3hydopsadkd44czucp4.py # Topologically Sorted Source Nodes: [sub, relu_out1, relu_out2, sub_1, relu_2], Original ATen: [aten.sub, aten.relu] # Source node to ATen node mapping: # relu_2 => relu_2 # relu_out1 => relu # relu_out2 => relu_1 # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg1_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %relu_1), kwargs = {}) # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) triton_poi_fused_relu_sub_0 = async_compile.triton('triton_poi_fused_relu_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp7 = tmp4 - tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, relu_out1, relu_out2, sub_1, relu_2], Original ATen: [aten.sub, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel3(nn.Module): """ Example model two from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2)) """ def __init__(self) ->None: super().__init__() def forward(self, input1, input2): relu_out1 = F.relu(input1 - 1) relu_out2 = F.relu(input2) return F.relu(relu_out1 - relu_out2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp7 = tmp4 - tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class BasicModel3New(nn.Module): """ Example model two from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2)) """ def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aravipati12/captum
BasicModel3
false
10,092
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
BasicModel2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ov/covtl3np3j4nh2pdhca34u6mah43w7eyge54o5nbnle2urh3r6dn.py # Topologically Sorted Source Nodes: [relu_out1, sub, relu_out2, sub_1, relu_2], Original ATen: [aten.relu, aten.sub] # Source node to ATen node mapping: # relu_2 => relu_2 # relu_out1 => relu # relu_out2 => relu_1 # sub => sub # sub_1 => sub_1 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, 1), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg1_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %relu_1), kwargs = {}) # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) triton_poi_fused_relu_sub_0 = async_compile.triton('triton_poi_fused_relu_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 1.0 tmp4 = tmp2 - tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp4 - tmp6 tmp8 = triton_helpers.maximum(tmp1, tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_out1, sub, relu_out2, sub_1, relu_2], Original ATen: [aten.relu, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_relu_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel2(nn.Module): """ Example model one from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2)) """ def __init__(self) ->None: super().__init__() def forward(self, input1, input2): relu_out1 = F.relu(input1) relu_out2 = F.relu(input2) return F.relu(relu_out1 - 1 - relu_out2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 1.0 tmp4 = tmp2 - tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp4 - tmp6 tmp8 = triton_helpers.maximum(tmp1, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class BasicModel2New(nn.Module): """ Example model one from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2)) """ def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aravipati12/captum
BasicModel2
false
10,093
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
BasicModel5_MultiArgs
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hw/chwghbhfuftf6dgnkuanyxfpmdrwayuzfezhrz5fqikqzqvy5sf6.py # Topologically Sorted Source Nodes: [sub, relu, relu_out1, relu_out2, relu_out2_1, sub_1, relu_2], Original ATen: [aten.sub, aten.relu, aten.mul] # Source node to ATen node mapping: # relu => relu # relu_2 => relu_2 # relu_out1 => mul # relu_out2 => relu_1 # relu_out2_1 => mul_1 # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %select), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg2_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_1, %select_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) triton_poi_fused_mul_relu_sub_0 = async_compile.triton('triton_poi_fused_mul_relu_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x2), xmask) tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = tmp4 * tmp5 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp10 = tmp8 * tmp9 tmp11 = tmp6 - tmp10 tmp12 = triton_helpers.maximum(tmp3, tmp11) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, relu, relu_out1, relu_out2, relu_out2_1, sub_1, relu_2], Original ATen: [aten.sub, aten.relu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_relu_sub_0.run(arg0_1, arg1_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel5_MultiArgs(nn.Module): """ Slightly modified example model from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1]) """ def __init__(self) ->None: super().__init__() def forward(self, input1, input2, additional_input1, additional_input2=0): relu_out1 = F.relu(input1 - 1) * additional_input1[0] relu_out2 = F.relu(input2) relu_out2 = relu_out2 * additional_input1[1] return F.relu(relu_out1 - relu_out2)[:, additional_input2] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = tmp4 * tmp5 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp10 = tmp8 * tmp9 tmp11 = tmp6 - tmp10 tmp12 = triton_helpers.maximum(tmp3, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_sub_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0), class BasicModel5_MultiArgsNew(nn.Module): """ Slightly modified example model from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1]) """ def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
aravipati12/captum
BasicModel5_MultiArgs
false
10,094
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
Attn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nj/cnjqzm7hm3u6ggjfvpspnl6pii56jckgwol3ydau4qesjyoteutl.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %permute_1], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = (xindex // 32) x1 = (xindex // 8) % 4 x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x2) + (16*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh] # Source node to ATen node mapping: # energy => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/i5/ci57psuuueutwfqpm57dmpddhnflxjjxpqzf6cwcsnd2zbemfstl.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_5, [4, 1]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lt/cltwbpokq7b7gvah2tjf27qlzw6vpmwfuzs3xfk7mhbxym753kvi.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rr/crrmj7r54x5uk325xkhuskxp4m5prz3fpx53yc2st4o5pwbhq32p.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_5, buf3, 16, grid=grid(16), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 16, grid=grid(16), stream=stream0) del buf5 return (reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, encoder_outputs): """ :param hidden: previous hidden state of the decoder, in shape (layers*directions,B,H) :param encoder_outputs: encoder outputs from Encoder, in shape (T,B,H) :return attention energies in shape (B,T) """ max_len = encoder_outputs.size(0) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) encoder_outputs = encoder_outputs.transpose(0, 1) attn_energies = self.score(H, encoder_outputs) return F.softmax(attn_energies, dim=1).unsqueeze(1) def score(self, hidden, encoder_outputs): cat = torch.cat([hidden, encoder_outputs], 2) energy = torch.tanh(self.attn(cat)) energy = energy.transpose(2, 1) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'method': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0) class AttnNew(nn.Module): def __init__(self, method, hidden_size): super(AttnNew, self).__init__() self.method = method self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def score(self, hidden, encoder_outputs): cat = torch.cat([hidden, encoder_outputs], 2) energy = torch.tanh(self.attn(cat)) energy = energy.transpose(2, 1) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def forward(self, input_0, input_1): primals_4 = self.v primals_3 = self.attn.weight primals_5 = self.attn.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
erytheis/HDSA-Dialog
Attn
false
10,095
[ "MIT" ]
0
08fa6c583e51989f45201e232864ccb495fa823c
https://github.com/erytheis/HDSA-Dialog/tree/08fa6c583e51989f45201e232864ccb495fa823c
Feedback
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/y6/cy6g24sdqsjaqjfbvlsjwxjxnicovho2svumdpyu3a6q2ahveris.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] # Source node to ATen node mapping: # leaky_relu => gt, mul, view_3, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_23, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x4), tmp7, xmask) tl.store(out_ptr0 + (x4), tmp7, xmask) tl.store(out_ptr1 + (x4), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] # Source node to ATen node mapping: # linear_1 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gk/cgkxbks6h4eqrscppxnaorv4esw33shmgdrwqxjp2hxbln2y3baz.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] # Source node to ATen node mapping: # h => gt_1, mul_1, view_10, where_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_8, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, 0.2), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_8, %mul_1), kwargs = {}) # %view_10 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_9, [4, 4, 4, 4]), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_15, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_view_2 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_view_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x4), tmp7, xmask) tl.store(out_ptr1 + (x4), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0.run(buf1, primals_2, buf2, buf8, 256, grid=grid(256), stream=stream0) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_view_2.run(buf5, primals_5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del primals_5 return (buf6, buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Feedback(nn.Module): def __init__(self, opt): super(Feedback, self).__init__() self.fc1 = nn.Linear(opt.ngh, opt.ngh) self.fc2 = nn.Linear(opt.ngh, opt.ngh) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, x): self.x1 = self.lrelu(self.fc1(x)) h = self.lrelu(self.fc2(self.x1)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(ngh=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0[grid(256)](buf1, primals_2, buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_view_2[grid(256)](buf5, primals_5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_5 return buf6, buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf3, buf7, primals_4, buf8 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class FeedbackNew(nn.Module): def __init__(self, opt): super(FeedbackNew, self).__init__() self.fc1 = nn.Linear(opt.ngh, opt.ngh) self.fc2 = nn.Linear(opt.ngh, opt.ngh) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
e96031413/tfvaegan
Feedback
false
10,096
[ "MIT" ]
0
4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
https://github.com/e96031413/tfvaegan/tree/4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
BasicModel6_MultiTensor
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/du/cdurbcnkyrxoijvuwl6ykrplldixgmls22pav5zfwcxxqn5aueuw.py # Topologically Sorted Source Nodes: [sub_1], Original ATen: [aten.rsub] # Source node to ATen node mapping: # sub_1 => sub_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select), kwargs = {}) triton_poi_fused_rsub_0 = async_compile.triton('triton_poi_fused_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp3 - tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1], Original ATen: [aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_rsub_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel6_MultiTensor(nn.Module): def __init__(self) ->None: super().__init__() def forward(self, input1, input2): input = input1 + input2 return 1 - F.relu(1 - input)[:, 1] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp3 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class BasicModel6_MultiTensorNew(nn.Module): def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aravipati12/captum
BasicModel6_MultiTensor
false
10,097
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
TinyCnn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/mz/cmzayskizyosepouf7bee2nxwrz22qdilo3yoc4dd5mvjqtrtqev.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 43200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 3 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lj/cljbe3cfakv4arvl5oacq6wg4nes5pgazbasnjbukx6m352uhudy.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 10800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) % 30 x4 = (xindex // 900) x3 = (xindex // 2700) x5 = xindex % 2700 tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + (2720*x3)), tmp6, xmask) tl.store(out_ptr1 + (x5 + (2816*x3)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ey/ceyh6jbt65oc6v44s64kl7q3amkcjurkcrbtrfc2dqaufyhgw3kv.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 33640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 841) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (3, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (3, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (10, 3, 2, 2), (12, 4, 2, 1)) assert_size_stride(primals_5, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 60, 60), (10800, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 3, 60, 60), (10848, 3616, 60, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 43200, grid=grid(43200), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 30, 30), (2720, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 3, 30, 30), (2816, 900, 30, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 10800, grid=grid(10800), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 10, 29, 29), (8410, 841, 29, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 33640, grid=grid(33640), stream=stream0) del primals_5 return (reinterpret_tensor(buf5, (3364, 10), (10, 1), 0), primals_1, primals_3, primals_4, buf1, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((3, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 3, 2, 2), (12, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class TinyCnn(nn.Module): def __init__(self, feature_extraction=False) ->None: super().__init__() self.feature_extraction = feature_extraction self.conv1 = nn.Conv2d(3, 3, 5) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(2, 2) if not self.feature_extraction: self.conv2 = nn.Conv2d(3, 10, 2) def forward(self, x): x = self.pool1(self.relu1(self.conv1(x))) if not self.feature_extraction: x = self.conv2(x) x = x.view(-1, 10) else: x = x.view(-1, 12) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 3 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 10800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x4 = xindex // 900 x3 = xindex // 2700 x5 = xindex % 2700 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + 2720 * x3), tmp6, xmask) tl.store(out_ptr1 + (x5 + 2816 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 33640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 841 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (3, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (10, 3, 2, 2), (12, 4, 2, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 60, 60), (10800, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 3, 60, 60), (10848, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(43200)](buf0, primals_2, buf1, 43200, XBLOCK=512, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 30, 30), (2720, 900, 30, 1), torch .float32) buf3 = empty_strided_cuda((4, 3, 30, 30), (2816, 900, 30, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(10800)](buf1, buf2, buf3, 10800, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 10, 29, 29), (8410, 841, 29, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(33640)](buf5, primals_5, 33640, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return reinterpret_tensor(buf5, (3364, 10), (10, 1), 0 ), primals_1, primals_3, primals_4, buf1, buf2, buf3 class TinyCnnNew(nn.Module): def __init__(self, feature_extraction=False) ->None: super().__init__() self.feature_extraction = feature_extraction self.conv1 = nn.Conv2d(3, 3, 5) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(2, 2) if not self.feature_extraction: self.conv2 = nn.Conv2d(3, 10, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
aravipati12/captum
TinyCnn
false
10,098
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
Lagrange
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/di/cdin4cn2ynf63iq6x2thid4wys2gmzaded4rvbg22vgxxyayfnrm.py # Topologically Sorted Source Nodes: [mul, sum_1, add, log, neg], Original ATen: [aten.mul, aten.sum, aten.add, aten.log, aten.neg] # Source node to ATen node mapping: # add => add # log => log # mul => mul # neg => neg # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) triton_poi_fused_add_log_mul_neg_sum_0 = async_compile.triton('triton_poi_fused_add_log_mul_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_mul_neg_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_log_mul_neg_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = -tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/52/c52sn6mfjn7szwdu5e2fleeg4kpdw7vl3wxpvimc5huyp46te3ma.py # Topologically Sorted Source Nodes: [mul, sum_1, add, log, neg, result, mean], Original ATen: [aten.mul, aten.sum, aten.add, aten.log, aten.neg, aten.mean] # Source node to ATen node mapping: # add => add # log => log # mean => mean # mul => mul # neg => neg # result => add_1 # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %arg2_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {}) triton_per_fused_add_log_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused_add_log_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r2), None) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1, add, log, neg], Original ATen: [aten.mul, aten.sum, aten.add, aten.log, aten.neg] stream0 = get_raw_stream(0) triton_poi_fused_add_log_mul_neg_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, sum_1, add, log, neg, result, mean], Original ATen: [aten.mul, aten.sum, aten.add, aten.log, aten.neg, aten.mean] triton_per_fused_add_log_mean_mul_neg_sum_1.run(buf2, buf0, arg2_1, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data def objective(x, h): return torch.log(1 + torch.sum(x * h, dim=1)) class Lagrange(nn.Module): def __init__(self): super(Lagrange, self).__init__() def forward(self, approx, dual, h): result = -objective(approx, h) + dual return torch.mean(result) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_log_mul_neg_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = -tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_per_fused_add_log_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_log_mul_neg_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_log_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, def objective(x, h): return torch.log(1 + torch.sum(x * h, dim=1)) class LagrangeNew(nn.Module): def __init__(self): super(LagrangeNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
goldenBill/Power_Control
Lagrange
false
10,099
[ "MIT" ]
0
8218aaffe8d5c69da454f76ecdacce46340cb81c
https://github.com/goldenBill/Power_Control/tree/8218aaffe8d5c69da454f76ecdacce46340cb81c
down_shifted_conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/lk/clkvmfnc5ppj6ffcl2v3tvac6pbvz3y7mizzrdi65zokiejjhua3.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 5 x0 = xindex % 6 x2 = (xindex // 30) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-1) + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2f/c2f6w3l6ahh7othacouiu2lpi7hja2epfoatgx6qgnjkgptctuad.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) triton_per_fused__weight_norm_interface_1 = async_compile.triton('triton_per_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (24*x0)), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (24*x0)), tmp9, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 480, grid=grid(480), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 2, 3), (24, 6, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_1.run(buf2, primals_3, primals_2, buf3, 4, 24, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf5, buf3, primals_2, primals_3, buf0, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 2, 3), (24, 6, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.utils import weight_norm as wn def down_shift(x, pad=None): xs = [int(y) for y in x.size()] x = x[:, :, :xs[2] - 1, :] pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad return pad(x) class down_shifted_conv2d(nn.Module): def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3), stride=(1, 1), shift_output_down=False, norm='weight_norm'): super(down_shifted_conv2d, self).__init__() assert norm in [None, 'batch_norm', 'weight_norm'] self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size, stride) self.shift_output_down = shift_output_down self.norm = norm self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int(( filter_size[1] - 1) / 2), filter_size[0] - 1, 0)) if norm == 'weight_norm': self.conv = wn(self.conv) elif norm == 'batch_norm': self.bn = nn.BatchNorm2d(num_filters_out) if shift_output_down: self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0, 0, 1, 0))) def forward(self, x): x = self.pad(x) x = self.conv(x) x = self.bn(x) if self.norm == 'batch_norm' else x return self.down_shift(x) if self.shift_output_down else x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filters_in': 4, 'num_filters_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils import weight_norm as wn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 5 x0 = xindex % 6 x2 = xindex // 30 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 24 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 2, 3), (24, 6, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3, primals_2, buf3, 4, 24, XBLOCK=1, num_warps=2, num_stages=1) buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3 def down_shift(x, pad=None): xs = [int(y) for y in x.size()] x = x[:, :, :xs[2] - 1, :] pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad return pad(x) class down_shifted_conv2dNew(nn.Module): def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3), stride=(1, 1), shift_output_down=False, norm='weight_norm'): super(down_shifted_conv2dNew, self).__init__() assert norm in [None, 'batch_norm', 'weight_norm'] self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size, stride) self.shift_output_down = shift_output_down self.norm = norm self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int(( filter_size[1] - 1) / 2), filter_size[0] - 1, 0)) if norm == 'weight_norm': self.conv = wn(self.conv) elif norm == 'batch_norm': self.bn = nn.BatchNorm2d(num_filters_out) if shift_output_down: self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0, 0, 1, 0))) def forward(self, input_0): primals_4 = self.conv.bias primals_2 = self.conv.weight_g primals_3 = self.conv.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
elahekhodaie/PixelCnnPP
down_shifted_conv2d
false
10,100
[ "MIT" ]
0
ab1e245ed8c24009364b1f891288eb1a526b0121
https://github.com/elahekhodaie/PixelCnnPP/tree/ab1e245ed8c24009364b1f891288eb1a526b0121
TanhDeepLiftModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3r/c3reenux7tcoceopsipct2lm5rye6gyn75npseuo5kjlokodcceg.py # Topologically Sorted Source Nodes: [tanh, mul, sub, tanh_1, mul_1, add], Original ATen: [aten.tanh, aten.mul, aten.sub, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # sub => sub # tanh => tanh # tanh_1 => tanh_1 # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 1.5), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_sub_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sub_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sub_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sub_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp4 = tl.load(in_ptr1 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp5 = 1.5 tmp6 = tmp4 - tmp5 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp7 * tmp2 tmp9 = tmp3 + tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mul, sub, tanh_1, mul_1, add], Original ATen: [aten.tanh, aten.mul, aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sub_tanh_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class TanhDeepLiftModel(nn.Module): """ Same as the ReLUDeepLiftModel, but with activations that can have negative outputs """ def __init__(self) ->None: super().__init__() self.tanh1 = nn.Tanh() self.tanh2 = nn.Tanh() def forward(self, x1, x2): return 2 * self.tanh1(x1) + 2 * self.tanh2(x2 - 1.5) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_sub_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp5 = 1.5 tmp6 = tmp4 - tmp5 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp7 * tmp2 tmp9 = tmp3 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sub_tanh_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TanhDeepLiftModelNew(nn.Module): """ Same as the ReLUDeepLiftModel, but with activations that can have negative outputs """ def __init__(self) ->None: super().__init__() self.tanh1 = nn.Tanh() self.tanh2 = nn.Tanh() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aravipati12/captum
TanhDeepLiftModel
false
10,101
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
ReLUDeepLiftModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cl/cclwe4nsvbh5ilb5hx2qh2wzgdxpgpiir62mwjqixvvsntxf3w4w.py # Topologically Sorted Source Nodes: [relu, mul, sub, relu_1, mul_1, add], Original ATen: [aten.relu, aten.mul, aten.sub, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # relu => relu # relu_1 => relu_1 # sub => sub # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 1.5), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_relu_sub_0 = async_compile.triton('triton_poi_fused_add_mul_relu_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp6 = 1.5 tmp7 = tmp5 - tmp6 tmp8 = triton_helpers.maximum(tmp1, tmp7) tmp9 = tmp8 * tmp3 tmp10 = tmp4 + tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu, mul, sub, relu_1, mul_1, add], Original ATen: [aten.relu, aten.mul, aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_relu_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ReLUDeepLiftModel(nn.Module): """ https://www.youtube.com/watch?v=f_iAM0NPwnM """ def __init__(self) ->None: super().__init__() self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() def forward(self, x1, x2, x3=2): return 2 * self.relu1(x1) + x3 * self.relu2(x2 - 1.5) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp6 = 1.5 tmp7 = tmp5 - tmp6 tmp8 = triton_helpers.maximum(tmp1, tmp7) tmp9 = tmp8 * tmp3 tmp10 = tmp4 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ReLUDeepLiftModelNew(nn.Module): """ https://www.youtube.com/watch?v=f_iAM0NPwnM """ def __init__(self) ->None: super().__init__() self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aravipati12/captum
ReLUDeepLiftModel
false
10,102
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
ODDetector
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] # Source node to ATen node mapping: # linear_1 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_7 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ODDetector(nn.Module): def __init__(self, input_dim, h_size, num_classes): super(ODDetector, self).__init__() self.relu = nn.ReLU(True) self.fc1 = nn.Linear(input_dim, h_size) self.fc2 = nn.Linear(h_size, h_size) self.classifier = nn.Linear(h_size, num_classes) def forward(self, x, center_loss=False): h = self.relu(self.fc1(x)) h = self.relu(self.fc2(h)) pred = self.classifier(h) return pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'h_size': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf4, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0) del buf4 extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_7 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, primals_6, buf7, primals_4, buf8 class ODDetectorNew(nn.Module): def __init__(self, input_dim, h_size, num_classes): super(ODDetectorNew, self).__init__() self.relu = nn.ReLU(True) self.fc1 = nn.Linear(input_dim, h_size) self.fc2 = nn.Linear(h_size, h_size) self.classifier = nn.Linear(h_size, num_classes) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.classifier.weight primals_7 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
e96031413/tfvaegan
ODDetector
false
10,103
[ "MIT" ]
0
4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
https://github.com/e96031413/tfvaegan/tree/4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
LinearMaxPoolLinearModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l7/cl7et626a3hygrp6632jwvtzl7kamkrqyyuwaki5lrfkro7ftlke.py # Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool1d => _low_memory_max_pool2d_with_offsets, getitem_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze_1, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x0), tmp15, xmask) tl.store(out_ptr1 + (x0), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.int8) buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) # Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(buf0, buf1, buf2, 4, grid=grid(4), stream=stream0) buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (4, 1), (1, 0), 0), primals_3, out=buf3) return (buf3, primals_1, reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0), buf1, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LinearMaxPoolLinearModel(nn.Module): def __init__(self) ->None: super().__init__() self.lin1 = nn.Linear(4, 4, bias=False) self.lin1.weight = nn.Parameter(torch.eye(4, 4)) self.pool1 = nn.MaxPool1d(4) self.lin2 = nn.Linear(1, 1, bias=False) self.lin2.weight = nn.Parameter(torch.ones(1, 1)) def forward(self, x): x = x.unsqueeze(1) return self.lin2(self.pool1(self.lin1(x))[:, 0, :]) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x0, tmp15, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.int8) buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(4)](buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (4, 1), (1, 0), 0), primals_3, out=buf3) return buf3, primals_1, reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0), buf1, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), primals_3 class LinearMaxPoolLinearModelNew(nn.Module): def __init__(self) ->None: super().__init__() self.lin1 = nn.Linear(4, 4, bias=False) self.lin1.weight = nn.Parameter(torch.eye(4, 4)) self.pool1 = nn.MaxPool1d(4) self.lin2 = nn.Linear(1, 1, bias=False) self.lin2.weight = nn.Parameter(torch.ones(1, 1)) def forward(self, input_0): primals_1 = self.lin1.weight primals_3 = self.lin2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
aravipati12/captum
LinearMaxPoolLinearModel
false
10,104
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
NormLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/kr/ckrxqy227ju5oy4cidmyb35esvbq3qjjqbbpiqrdjo6j5hkwzhi4.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4.00000001), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.249999999375 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NormLayer(nn.Module): def __init__(self, mean, std, n=None, eps=1e-08) ->None: super().__init__() self.mean = mean self.std = std self.eps = eps def forward(self, x): return (x - self.mean) / (self.std + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'mean': 4, 'std': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.249999999375 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormLayerNew(nn.Module): def __init__(self, mean, std, n=None, eps=1e-08) ->None: super().__init__() self.mean = mean self.std = std self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
aravipati12/captum
NormLayer
false
10,105
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
BasicModel_ConvNet_One_Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yd/cyd55c65mxewrzonhexkzt7kvedl7miq5zljenj22e3cmx5nh7ux.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 30752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 3844) % 2 x0 = xindex % 3844 x3 = (xindex // 3844) tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (3872*x3)), tmp4, xmask) tl.store(out_ptr1 + (x0 + (3968*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ro/crodgwiht3rd3qdm6jeb6ah3gh7adez5xxahguzmhiypughkk6gt.py # Topologically Sorted Source Nodes: [conv2d, x, x_1], Original ATen: [aten.convolution, aten.relu, aten.view] # Source node to ATen node mapping: # conv2d => convolution # x => relu # x_1 => view # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu, [-1, 8]), kwargs = {}) triton_poi_fused_convolution_relu_view_1 = async_compile.triton('triton_poi_fused_convolution_relu_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 30752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + ((3872*(x0 // 3844)) + (x0 % 3844)), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yj/cyjz2ijb5syran7d5wcbcd5dx6qnsec2qssp45og23tmuoyekyir.py # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 15376 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1), torch.float32) buf6 = empty_strided_cuda((4, 2, 62, 62), (7936, 3968, 62, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf0, primals_2, buf1, buf6, 30752, grid=grid(30752), stream=stream0) del primals_2 buf2 = reinterpret_tensor(buf0, (3844, 8), (8, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x, x_1], Original ATen: [aten.convolution, aten.relu, aten.view] triton_poi_fused_convolution_relu_view_1.run(buf1, buf2, 30752, grid=grid(30752), stream=stream0) del buf1 buf3 = empty_strided_cuda((3844, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf3) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((3844, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf5, 15376, grid=grid(15376), stream=stream0) del primals_5 return (buf4, primals_1, primals_3, buf2, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from typing import Optional import torch.nn as nn from typing import no_type_check class BasicModel_ConvNet_One_Conv(nn.Module): def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.conv1 = nn.Conv2d(1, 2, 3, 1) self.relu1 = nn.ReLU(inplace=inplace) self.fc1 = nn.Linear(8, 4) self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3)) self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0])) self.fc1.weight = nn.Parameter(torch.cat([torch.ones(4, 5), -1 * torch.ones(4, 3)], dim=1)) self.fc1.bias = nn.Parameter(torch.zeros(4)) self.relu2 = nn.ReLU(inplace=inplace) @no_type_check def forward(self, x: 'Tensor', x2: 'Optional[Tensor]'=None): if x2 is not None: x = x + x2 x = self.relu1(self.conv1(x)) x = x.view(-1, 8) return self.relu2(self.fc1(x)) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 30752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 3844 % 2 x0 = xindex % 3844 x3 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 3872 * x3), tmp4, xmask) tl.store(out_ptr1 + (x0 + 3968 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 30752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3872 * (x0 // 3844) + x0 % 3844), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 15376 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1), torch.float32) buf6 = empty_strided_cuda((4, 2, 62, 62), (7936, 3968, 62, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(30752)]( buf0, primals_2, buf1, buf6, 30752, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = reinterpret_tensor(buf0, (3844, 8), (8, 1), 0) del buf0 triton_poi_fused_convolution_relu_view_1[grid(30752)](buf1, buf2, 30752, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((3844, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (8, 4), (1, 8 ), 0), out=buf3) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((3844, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(15376)](buf4, primals_5, buf5, 15376, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf4, primals_1, primals_3, buf2, buf5, primals_4, buf6 class BasicModel_ConvNet_One_ConvNew(nn.Module): def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.conv1 = nn.Conv2d(1, 2, 3, 1) self.relu1 = nn.ReLU(inplace=inplace) self.fc1 = nn.Linear(8, 4) self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3)) self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0])) self.fc1.weight = nn.Parameter(torch.cat([torch.ones(4, 5), -1 * torch.ones(4, 3)], dim=1)) self.fc1.bias = nn.Parameter(torch.zeros(4)) self.relu2 = nn.ReLU(inplace=inplace) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
aravipati12/captum
BasicModel_ConvNet_One_Conv
false
10,106
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
ReduceDim
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div] # Source node to ATen node mapping: # x_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ReduceDim(nn.Module): def __init__(self, input_dimension, output_dimension): super(ReduceDim, self).__init__() self.fc = nn.Linear(input_dimension, output_dimension) def forward(self, x): x = self.fc(x) x = F.normalize(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dimension': 4, 'output_dimension': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class ReduceDimNew(nn.Module): def __init__(self, input_dimension, output_dimension): super(ReduceDimNew, self).__init__() self.fc = nn.Linear(input_dimension, output_dimension) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dendisuhubdy/collaborative-experts
ReduceDim
false
10,107
[ "MIT" ]
0
e6db63837537c054723ce00b73264101acc29d39
https://github.com/dendisuhubdy/collaborative-experts/tree/e6db63837537c054723ce00b73264101acc29d39
SigmoidDeepLiftModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3v/c3v7n6hzyrv5pn6uojl3hf6tko347a672spakigdzmqm7ebd4zwl.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2h/c2h7x7vqvl2ldlya5jpbhvac4kxygymipwfzvir2qf4zuwykft5n.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [lin1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf4, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [lin2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf3, 256, grid=grid(256), stream=stream0) return (buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SigmoidDeepLiftModel(nn.Module): """ Model architecture from: https://medium.com/coinmonks/create-a-neural-network-in -pytorch-and-make-your-life-simpler-ec5367895199 """ def __init__(self, num_in, num_hidden, num_out) ->None: super().__init__() self.num_in = num_in self.num_hidden = num_hidden self.num_out = num_out self.lin1 = nn.Linear(num_in, num_hidden, bias=False) self.lin2 = nn.Linear(num_hidden, num_out, bias=False) self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in)) self.lin2.weight = nn.Parameter(torch.ones(num_out, num_hidden)) self.relu1 = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input): lin1 = self.lin1(input) lin2 = self.lin2(self.relu1(lin1)) return self.sigmoid(lin2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in': 4, 'num_hidden': 4, 'num_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(256)](buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_3, buf4 class SigmoidDeepLiftModelNew(nn.Module): """ Model architecture from: https://medium.com/coinmonks/create-a-neural-network-in -pytorch-and-make-your-life-simpler-ec5367895199 """ def __init__(self, num_in, num_hidden, num_out) ->None: super().__init__() self.num_in = num_in self.num_hidden = num_hidden self.num_out = num_out self.lin1 = nn.Linear(num_in, num_hidden, bias=False) self.lin2 = nn.Linear(num_hidden, num_out, bias=False) self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in)) self.lin2.weight = nn.Parameter(torch.ones(num_out, num_hidden)) self.relu1 = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.lin1.weight primals_3 = self.lin2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
aravipati12/captum
SigmoidDeepLiftModel
false
10,108
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # out_1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class Net(nn.Module): def __init__(self, input_size, hidden_size, num_distros): super(Net, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.tanh = nn.Tanh() self.fc2 = nn.Linear(hidden_size, num_distros) def forward(self, x): out = self.fc1(x) out = self.tanh(out) out = self.fc2(out) return out def get_mixture_coef(self, x): y = self.forward(x) out_pi, out_sigma, out_mu = np.split(y, 3) max_pi = np.amax(out_pi) out_pi = np.subtract(out_pi, max_pi) out_pi = np.exp(out_pi) normalize_pi = np.reciprocal(np.sum(out_pi, axis=1)) out_pi = np.multiply(normalize_pi, out_pi) out_sigma = np.exp(out_sigma) return out_pi, out_sigma, out_mu def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'num_distros': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4 class NetNew(nn.Module): def __init__(self, input_size, hidden_size, num_distros): super(NetNew, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.tanh = nn.Tanh() self.fc2 = nn.Linear(hidden_size, num_distros) def get_mixture_coef(self, x): y = self.forward(x) out_pi, out_sigma, out_mu = np.split(y, 3) max_pi = np.amax(out_pi) out_pi = np.subtract(out_pi, max_pi) out_pi = np.exp(out_pi) normalize_pi = np.reciprocal(np.sum(out_pi, axis=1)) out_pi = np.multiply(normalize_pi, out_pi) out_sigma = np.exp(out_sigma) return out_pi, out_sigma, out_mu def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gganssle/mixture-density-networks
Net
false
10,109
[ "Apache-2.0" ]
0
246f05d8a1dedd259232760a1b54ac5845c4b8f6
https://github.com/gganssle/mixture-density-networks/tree/246f05d8a1dedd259232760a1b54ac5845c4b8f6
Discriminator_D1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat] # Source node to ATen node mapping: # h => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5n/c5nsofekij6a7ap52g7x25mzkhruvzcsi6l2zpm7nkpnzba6k7fc.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor, %mul), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return (buf4, buf2, buf0, buf2, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Discriminator_D1(nn.Module): def __init__(self, opt): super(Discriminator_D1, self).__init__() self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh) self.fc2 = nn.Linear(opt.ndh, 1) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, x, att): h = torch.cat((x, att), 1) self.hidden = self.lrelu(self.fc1(h)) h = self.fc2(self.hidden) return h def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(resSize=4, attSize=4, ndh=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return buf4, buf2, buf0, buf2, primals_5 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Discriminator_D1New(nn.Module): def __init__(self, opt): super(Discriminator_D1New, self).__init__() self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh) self.fc2 = nn.Linear(opt.ndh, 1) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
e96031413/tfvaegan
Discriminator_D1
false
10,110
[ "MIT" ]
0
4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
https://github.com/e96031413/tfvaegan/tree/4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
SoftmaxDeepLiftModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [lin1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # lin1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [lin1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [lin2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [lin3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftmaxDeepLiftModel(nn.Module): """ Model architecture from: https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/ """ def __init__(self, num_in, num_hidden, num_out) ->None: super().__init__() self.num_in = num_in self.num_hidden = num_hidden self.num_out = num_out self.lin1 = nn.Linear(num_in, num_hidden) self.lin2 = nn.Linear(num_hidden, num_hidden) self.lin3 = nn.Linear(num_hidden, num_out) self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in)) self.lin2.weight = nn.Parameter(torch.ones(num_hidden, num_hidden)) self.lin3.weight = nn.Parameter(torch.ones(num_out, num_hidden)) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, input): lin1 = self.relu1(self.lin1(input)) lin2 = self.relu2(self.lin2(lin1)) lin3 = self.lin3(lin2) return self.softmax(lin3) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in': 4, 'num_hidden': 4, 'num_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class SoftmaxDeepLiftModelNew(nn.Module): """ Model architecture from: https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/ """ def __init__(self, num_in, num_hidden, num_out) ->None: super().__init__() self.num_in = num_in self.num_hidden = num_hidden self.num_out = num_out self.lin1 = nn.Linear(num_in, num_hidden) self.lin2 = nn.Linear(num_hidden, num_hidden) self.lin3 = nn.Linear(num_hidden, num_out) self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in)) self.lin2.weight = nn.Parameter(torch.ones(num_hidden, num_hidden)) self.lin3.weight = nn.Parameter(torch.ones(num_out, num_hidden)) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
aravipati12/captum
SoftmaxDeepLiftModel
false
10,111
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
VarianceC
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/4f/c4fxn76mohifb2jk7b5g6obnv7yu74nc2psuptse7q6htokknm5w.py # Topologically Sorted Source Nodes: [mean_x, sub_x, mul, x], Original ATen: [aten.mean, aten.sub, aten.mul] # Source node to ATen node mapping: # mean_x => mean # mul => mul # sub_x => sub # x => mean_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [1], True), kwargs = {}) triton_poi_fused_mean_mul_sub_0 = async_compile.triton('triton_poi_fused_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean_x, sub_x, mul, x], Original ATen: [aten.mean, aten.sub, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mean_mul_sub_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class VarianceC(nn.Module): def __init__(self): super(VarianceC, self).__init__() def forward(self, x): mean_x = torch.mean(x, dim=1, keepdim=True) sub_x = x.sub(mean_x) x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_mul_sub_0[grid(64)](arg0_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 return buf0, class VarianceCNew(nn.Module): def __init__(self): super(VarianceCNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
caixin1998/pl-template
VarianceC
false
10,112
[ "BSD-3-Clause" ]
0
6918f0289ab2b32d107e5722617d25c9a683399c
https://github.com/caixin1998/pl-template/tree/6918f0289ab2b32d107e5722617d25c9a683399c
BasicModel4_MultiArgs
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/sj/csjzb4uw4rohankl5tidcjiumip66fptbo7ooho2r5f4uwu47tsq.py # Topologically Sorted Source Nodes: [sub, relu_out1, relu_out2, relu_out2_1, sub_1, relu_2], Original ATen: [aten.sub, aten.relu, aten.div] # Source node to ATen node mapping: # relu_2 => relu_2 # relu_out1 => relu # relu_out2 => relu_1 # relu_out2_1 => div # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg1_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu_1, %arg2_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %div), kwargs = {}) # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) triton_poi_fused_div_relu_sub_0 = async_compile.triton('triton_poi_fused_div_relu_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp7 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp4 - tmp8 tmp10 = triton_helpers.maximum(tmp3, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, relu_out1, relu_out2, relu_out2_1, sub_1, relu_2], Original ATen: [aten.sub, aten.relu, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_relu_sub_0.run(arg0_1, arg1_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel4_MultiArgs(nn.Module): """ Slightly modified example model from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3) """ def __init__(self) ->None: super().__init__() def forward(self, input1, input2, additional_input1, additional_input2=0): relu_out1 = F.relu(input1 - 1) relu_out2 = F.relu(input2) relu_out2 = relu_out2.div(additional_input1) return F.relu(relu_out1 - relu_out2)[:, additional_input2] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp4 - tmp8 tmp10 = triton_helpers.maximum(tmp3, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_relu_sub_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0), class BasicModel4_MultiArgsNew(nn.Module): """ Slightly modified example model from the paper https://arxiv.org/pdf/1703.01365.pdf f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3) """ def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
aravipati12/captum
BasicModel4_MultiArgs
false
10,113
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
SpatialGather_Module
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/iv/civr7hz7pwb7nd5q352sqsjvxezkx6m6jnyztaygkt2ugewh5ejx.py # Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # probs_1 => div, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + (16*x0)), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_per_fused__softmax_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [probs_1, ocr_context], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg1_1 del buf2 return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch._utils class SpatialGather_Module(nn.Module): """ Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. Output: The correlation of every class map with every feature map shape = [n, num_feats, num_classes, 1] """ def __init__(self, cls_num=0, scale=1): super(SpatialGather_Module, self).__init__() self.cls_num = cls_num self.scale = scale def forward(self, feats, probs): batch_size, c, _, _ = probs.size(0), probs.size(1), probs.size(2 ), probs.size(3) probs = probs.view(batch_size, c, -1) feats = feats.view(batch_size, feats.size(1), -1) feats = feats.permute(0, 2, 1) probs = F.softmax(self.scale * probs, dim=2) ocr_context = torch.matmul(probs, feats) ocr_context = ocr_context.permute(0, 2, 1).unsqueeze(3) return ocr_context def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK= 1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg1_1 del buf2 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0), class SpatialGather_ModuleNew(nn.Module): """ Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. Output: The correlation of every class map with every feature map shape = [n, num_feats, num_classes, 1] """ def __init__(self, cls_num=0, scale=1): super(SpatialGather_ModuleNew, self).__init__() self.cls_num = cls_num self.scale = scale def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
garylidd/semantic-segmentation
SpatialGather_Module
false
10,114
[ "BSD-3-Clause" ]
0
64ae675076bea12ab994e7ae88d719a413e9c484
https://github.com/garylidd/semantic-segmentation/tree/64ae675076bea12ab994e7ae88d719a413e9c484
MultiRelu
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from typing import Tuple import torch.nn as nn from typing import no_type_check class MultiRelu(nn.Module): def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.relu1 = nn.ReLU(inplace=inplace) self.relu2 = nn.ReLU(inplace=inplace) @no_type_check def forward(self, arg1: 'Tensor', arg2: 'Tensor') ->Tuple[Tensor, Tensor]: return self.relu1(arg1), self.relu2(arg2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_relu_0[grid(256)](arg1_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 return buf0, buf1 class MultiReluNew(nn.Module): def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.relu1 = nn.ReLU(inplace=inplace) self.relu2 = nn.ReLU(inplace=inplace) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
aravipati12/captum
MultiRelu
false
10,115
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
AttDec
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wn/cwnu6ncturya7giec3q2cikfd3y3bkgzl4ibxp3ub3fpg5fssvsj.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ro/croeu53p6tbxtp6xjj4kgyxrozjl2ozleivfmextpoqt2lvk7ggz.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.div] # Source node to ATen node mapping: # h_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%addmm_1, %expand), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) return (buf3, buf1, primals_1, buf1, buf2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class AttDec(nn.Module): def __init__(self, opt, attSize): super(AttDec, self).__init__() self.embedSz = 0 self.fc1 = nn.Linear(opt.resSize + self.embedSz, opt.ngh) self.fc3 = nn.Linear(opt.ngh, attSize) self.lrelu = nn.LeakyReLU(0.2, True) self.hidden = None self.sigmoid = None self.apply(weights_init) def forward(self, feat, att=None): h = feat if self.embedSz > 0: assert att is not None, 'Conditional Decoder requires attribute input' h = torch.cat((feat, att), 1) self.hidden = self.lrelu(self.fc1(h)) h = self.fc3(self.hidden) if self.sigmoid is not None: h = self.sigmoid(h) else: h = h / h.pow(2).sum(1).sqrt().unsqueeze(1).expand(h.size(0), h .size(1)) self.out = h return h def getLayersOutDet(self): return self.hidden.detach() def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(resSize=4, ngh=4), 'attSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, buf1, primals_1, buf1, buf2, primals_4 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class AttDecNew(nn.Module): def __init__(self, opt, attSize): super(AttDecNew, self).__init__() self.embedSz = 0 self.fc1 = nn.Linear(opt.resSize + self.embedSz, opt.ngh) self.fc3 = nn.Linear(opt.ngh, attSize) self.lrelu = nn.LeakyReLU(0.2, True) self.hidden = None self.sigmoid = None self.apply(weights_init) def getLayersOutDet(self): return self.hidden.detach() def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc1.bias primals_2 = self.fc3.weight primals_5 = self.fc3.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
e96031413/tfvaegan
AttDec
false
10,116
[ "MIT" ]
0
4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
https://github.com/e96031413/tfvaegan/tree/4d0512c6ce98155b9e8ba37fbcf90d43cd5bbe90
ScoringFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ps/cpsypcj54vlvwxescfz5tnn62zwx7pnm2flnavcb4wuyhw2ysjyl.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.sigmoid, aten.sigmoid_backward] # Source node to ATen node mapping: # x => convolution # x_1 => sigmoid # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {}) triton_poi_fused_convolution_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_convolution_sigmoid_sigmoid_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_sigmoid_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.sigmoid, aten.sigmoid_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_sigmoid_sigmoid_backward_0.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 16), (16, 1), 0), primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class Conv2dAct(nn.Module): def __init__(self, in_channels, out_channels, ksize=1, activation='relu'): super(Conv2dAct, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, ksize) if activation == 'sigmoid': self.act = nn.Sigmoid() elif activation == 'relu': self.act = nn.ReLU() elif activation == 'tanh': self.act = nn.Tanh() def forward(self, x): x = self.conv(x) x = self.act(x) return x class VarianceC(nn.Module): def __init__(self): super(VarianceC, self).__init__() def forward(self, x): mean_x = torch.mean(x, dim=1, keepdim=True) sub_x = x.sub(mean_x) x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True) return x class ScoringFunction(nn.Module): def __init__(self, in_channels, var=False): super(ScoringFunction, self).__init__() if var: self.reduce_channel = VarianceC() else: self.reduce_channel = Conv2dAct(in_channels, 1, 1, 'sigmoid') def forward(self, x): x = self.reduce_channel(x) x = x.view(x.size(0), -1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_sigmoid_sigmoid_backward_0[grid(64)](buf1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 16), (16, 1), 0 ), primals_1, primals_3, buf2 class Conv2dAct(nn.Module): def __init__(self, in_channels, out_channels, ksize=1, activation='relu'): super(Conv2dAct, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, ksize) if activation == 'sigmoid': self.act = nn.Sigmoid() elif activation == 'relu': self.act = nn.ReLU() elif activation == 'tanh': self.act = nn.Tanh() def forward(self, x): x = self.conv(x) x = self.act(x) return x class VarianceC(nn.Module): def __init__(self): super(VarianceC, self).__init__() def forward(self, x): mean_x = torch.mean(x, dim=1, keepdim=True) sub_x = x.sub(mean_x) x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True) return x class ScoringFunctionNew(nn.Module): def __init__(self, in_channels, var=False): super(ScoringFunctionNew, self).__init__() if var: self.reduce_channel = VarianceC() else: self.reduce_channel = Conv2dAct(in_channels, 1, 1, 'sigmoid') def forward(self, input_0): primals_1 = self.reduce_channel.conv.weight primals_2 = self.reduce_channel.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
caixin1998/pl-template
ScoringFunction
false
10,117
[ "BSD-3-Clause" ]
0
6918f0289ab2b32d107e5722617d25c9a683399c
https://github.com/caixin1998/pl-template/tree/6918f0289ab2b32d107e5722617d25c9a683399c
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xl/cxlr2xte6c7jd75mdwb54w5umgamgrdcqmn23zeuxiqyn62chliv.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 116160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 1936) % 15 x0 = xindex % 1936 x4 = (xindex // 1936) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (1952*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qq/cqqyj7bzg2tuugs76qxqaau5ht6la2vyotf56k3rxacwkb3cwmzu.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 29040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 22 x1 = (xindex // 22) % 22 x4 = (xindex // 484) x3 = (xindex // 7260) x5 = xindex % 7260 tmp0 = tl.load(in_ptr0 + ((2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (44 + (2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (45 + (2*x0) + (88*x1) + (1952*x4)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + (7264*x3)), tmp6, xmask) tl.store(out_ptr1 + (x5 + (7296*x3)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/m7/cm7jzmc45yt3dzgau4tc3j2kpbaq45lmf3o2otbw7ro2znjnx7bd.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 38880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 324) % 30 x2 = (xindex // 9720) x4 = xindex % 9720 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + (9728*x2)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cb/ccb4agz3nlvl6ebpdootpbhnzmbldcltol5al74rlft7nhsz5oas.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 9720 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 9 x1 = (xindex // 9) % 270 x2 = (xindex // 2430) x3 = xindex % 2430 tmp0 = tl.load(in_ptr0 + ((2*x0) + (36*x1) + (9728*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (36*x1) + (9728*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (18 + (2*x0) + (36*x1) + (9728*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (19 + (2*x0) + (36*x1) + (9728*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x3 + (2432*x2)), tmp15, xmask) tl.store(out_ptr1 + (x3 + (2432*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7k/c7k5thbqokkbssdemkgd4j3bmg3f5m4fuena5jfu3mvt3vppva6o.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (15, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (15, ), (1, )) assert_size_stride(primals_3, (4, 1, 48, 48), (2304, 2304, 48, 1)) assert_size_stride(primals_4, (30, 15, 5, 5), (375, 25, 5, 1)) assert_size_stride(primals_5, (30, ), (1, )) assert_size_stride(primals_6, (300, 2430), (2430, 1)) assert_size_stride(primals_7, (300, ), (1, )) assert_size_stride(primals_8, (2, 300), (300, 1)) assert_size_stride(primals_9, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 15, 44, 44), (29040, 1936, 44, 1)) buf1 = empty_strided_cuda((4, 15, 44, 44), (29280, 1952, 44, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 116160, grid=grid(116160), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 15, 22, 22), (7264, 484, 22, 1), torch.float32) buf3 = empty_strided_cuda((4, 15, 22, 22), (7296, 484, 22, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 29040, grid=grid(29040), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 30, 18, 18), (9720, 324, 18, 1)) buf5 = empty_strided_cuda((4, 30, 18, 18), (9728, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 38880, grid=grid(38880), stream=stream0) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 30, 9, 9), (2432, 81, 9, 1), torch.int8) buf7 = empty_strided_cuda((4, 30, 9, 9), (2432, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 9720, grid=grid(9720), stream=stream0) buf8 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 2430), (2432, 1), 0), reinterpret_tensor(primals_6, (2430, 300), (1, 2430), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 1200, grid=grid(1200), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (300, 2), (1, 300), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 2430), (2432, 1), 0), buf9, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((15, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((15, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 48, 48), (2304, 2304, 48, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((30, 15, 5, 5), (375, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((300, 2430), (2430, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 15, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(15, 30, 5) self.fc1 = nn.Linear(30 * 9 * 9, 300) self.fc2 = nn.Linear(300, 2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 30 * 9 * 9) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 48, 48])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 116160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1936 % 15 x0 = xindex % 1936 x4 = xindex // 1936 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 1952 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 29040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 22 x1 = xindex // 22 % 22 x4 = xindex // 484 x3 = xindex // 7260 x5 = xindex % 7260 tmp0 = tl.load(in_ptr0 + (2 * x0 + 88 * x1 + 1952 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 88 * x1 + 1952 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (44 + 2 * x0 + 88 * x1 + 1952 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (45 + 2 * x0 + 88 * x1 + 1952 * x4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + 7264 * x3), tmp6, xmask) tl.store(out_ptr1 + (x5 + 7296 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 324 % 30 x2 = xindex // 9720 x4 = xindex % 9720 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 9728 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 9720 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 9 x1 = xindex // 9 % 270 x2 = xindex // 2430 x3 = xindex % 2430 tmp0 = tl.load(in_ptr0 + (2 * x0 + 36 * x1 + 9728 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 36 * x1 + 9728 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (18 + 2 * x0 + 36 * x1 + 9728 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (19 + 2 * x0 + 36 * x1 + 9728 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x3 + 2432 * x2), tmp15, xmask) tl.store(out_ptr1 + (x3 + 2432 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (15, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (15,), (1,)) assert_size_stride(primals_3, (4, 1, 48, 48), (2304, 2304, 48, 1)) assert_size_stride(primals_4, (30, 15, 5, 5), (375, 25, 5, 1)) assert_size_stride(primals_5, (30,), (1,)) assert_size_stride(primals_6, (300, 2430), (2430, 1)) assert_size_stride(primals_7, (300,), (1,)) assert_size_stride(primals_8, (2, 300), (300, 1)) assert_size_stride(primals_9, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 15, 44, 44), (29040, 1936, 44, 1)) buf1 = empty_strided_cuda((4, 15, 44, 44), (29280, 1952, 44, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(116160)](buf0, primals_2, buf1, 116160, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 15, 22, 22), (7264, 484, 22, 1), torch.float32) buf3 = empty_strided_cuda((4, 15, 22, 22), (7296, 484, 22, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(29040)](buf1, buf2, buf3, 29040, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 30, 18, 18), (9720, 324, 18, 1)) buf5 = empty_strided_cuda((4, 30, 18, 18), (9728, 324, 18, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(38880)](buf4, primals_5, buf5, 38880, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 30, 9, 9), (2432, 81, 9, 1), torch.int8) buf7 = empty_strided_cuda((4, 30, 9, 9), (2432, 81, 9, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_3[grid(9720)](buf5, buf6, buf7, 9720, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 2430), (2432, 1), 0), reinterpret_tensor(primals_6, (2430, 300), (1, 2430), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(1200)](buf9, primals_7, 1200, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (300, 2), (1, 300), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 2430), (2432, 1), 0), buf9, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(1, 15, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(15, 30, 5) self.fc1 = nn.Linear(30 * 9 * 9, 300) self.fc2 = nn.Linear(300, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
gg4u/cnc_2017
Net
false
10,118
[ "MIT" ]
0
1a5c52c3207ba131139214d14a2161af2db80a5c
https://github.com/gg4u/cnc_2017/tree/1a5c52c3207ba131139214d14a2161af2db80a5c
BasicCNN1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3r/c3rslv2exiiurkywxt4oxifqli4zlmoxh26mfj7b2xmxbacdj7n6.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cb/ccbdveqsgunhla4cfhtfjdkif3utkipupej6kcqiiji3bjat2x63.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nl/cnlxxnmf5ygykjzvjljgvug3jvcu7du2i7h7ssfgcz5bdrzqhq5k.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) % 32 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (64*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2080 + x0 + (64*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/na/cnaiponhu6kavyqptxmbfaxdb2osqwlqk74kcqgtnst5s3wwic5o.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4x/c4xqpnjemnncshp3uobexba3gggcnvtfyqm77kedjl5hqdw3frxf.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 16 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (128*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + (128*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sl/csldasgiybuk75pltndkezbj76bkzandkkdq65rbg7u7qjxpoaag.py # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uf/cufauwupgjdrp6mauwivuddmfomag5gobvtyuvy22h7zpdtrudh5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = (yindex // 8) y5 = yindex y4 = (yindex // 64) y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2048 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2176 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (64*x2) + (8192*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4z/c4zkjrymdmkix2ew2ul2mfnu6lkblf6zlcrmopfi6cbx5ypowtyn.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_10 = async_compile.triton('triton_poi_fused_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 216 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (216, 2048), (2048, 1)) assert_size_stride(primals_9, (216, ), (1, )) assert_size_stride(primals_10, (10, 216), (216, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 96, 9, grid=grid(96, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf5, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32) buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf5, buf6, buf7, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf9, primals_5, 262144, grid=grid(262144), stream=stream0) del primals_5 buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf9, buf10, buf11, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf13, primals_7, 131072, grid=grid(131072), stream=stream0) del primals_7 buf14 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_9.run(buf13, buf14, buf15, 256, 128, grid=grid(256, 128), stream=stream0) buf16 = empty_strided_cuda((16, 216), (216, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 216), (1, 2048), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_10.run(buf17, primals_9, 3456, grid=grid(3456), stream=stream0) del primals_9 buf18 = empty_strided_cuda((16, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf17, reinterpret_tensor(primals_10, (216, 10), (1, 216), 0), alpha=1, beta=1, out=buf18) del primals_11 return (buf18, buf0, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), buf17, primals_10, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((216, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((216, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 216), (216, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicCNN1(nn.Module): def __init__(self): super().__init__() self.layer_names = ['conv1', 'conv2', 'conv3', 'fc1', 'output_layer'] self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(4 * 4 * 128, 216) self.output_layer = nn.Linear(216, 10) self.dropout = nn.Dropout(0.2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 4 * 4 * 128) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.output_layer(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 32 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 64 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2080 + x0 + 64 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 16 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y5 = yindex y4 = yindex // 64 y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2048 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2176 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 64 * x2 + 8192 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 3456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 216 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (216, 2048), (2048, 1)) assert_size_stride(primals_9, (216,), (1,)) assert_size_stride(primals_10, (10, 216), (216, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(524288)](buf5, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32) buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(131072)](buf5, buf6, buf7, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(262144)](buf9, primals_5, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(65536)](buf9, buf10, buf11, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_8[grid(131072)](buf13, primals_7, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf14 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_9[grid(256, 128)](buf13, buf14, buf15, 256, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((16, 216), (216, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 216), (1, 2048), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_10[grid(3456)](buf17, primals_9, 3456, XBLOCK =256, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf17, reinterpret_tensor( primals_10, (216, 10), (1, 216), 0), alpha=1, beta=1, out=buf18) del primals_11 return (buf18, buf0, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor(buf15, (16, 2048), (2048, 1 ), 0), buf17, primals_10, primals_8) class BasicCNN1New(nn.Module): def __init__(self): super().__init__() self.layer_names = ['conv1', 'conv2', 'conv3', 'fc1', 'output_layer'] self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(4 * 4 * 128, 216) self.output_layer = nn.Linear(216, 10) self.dropout = nn.Dropout(0.2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fnc11/CosDefence
BasicCNN1
false
10,119
[ "MIT" ]
0
94f451b7d4b36cb3b9fcc85098dae242f311532b
https://github.com/fnc11/CosDefence/tree/94f451b7d4b36cb3b9fcc85098dae242f311532b
DisConvModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/pl/cpl2phcxgwq27h7hrkttckk2rwsgsjtseizody7iq5gpui6mr7xd.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/o7/co7q4vm5trvbqdcumxlbkclncooaexuslaaazlkr55noaxme4mfi.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xs/cxsybwlktadgfr5i5ocek54wnwiwvwjfehkwokwt2npfoitrjcek.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_4 => convolution_2 # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lr/clrgts2iohcmp3svgdcrrk4uszvars7l4nnlqq72v6xk4x7pzlju.py # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_6 => convolution_3 # x_7 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (8, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (16, 8, 5, 5), (200, 25, 5, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (16, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_9, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, 32, grid=grid(32), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 1, 1), (16, 1, 1, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf5, primals_7, 64, grid=grid(64), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1)) buf7 = buf6; del buf6 # reuse buf8 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf7, primals_9, buf8, 64, grid=grid(64), stream=stream0) del primals_9 return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 8, 5, 5), (200, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0, rate=1, activation='lrelu'): return Conv2dBlock(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation) class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) def forward(self, x): if self.pad: x = self.conv(self.pad(x)) else: x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class DisConvModule(nn.Module): def __init__(self, input_dim, cnum): super(DisConvModule, self).__init__() self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2) self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2) self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2) self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'cnum': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 5, 5), (200, 25, 5, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_9, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(32)](buf3, primals_5, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 1, 1), (16, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(64) ](buf7, primals_9, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8) def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0, rate=1, activation='lrelu'): return Conv2dBlock(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation) class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) def forward(self, x): if self.pad: x = self.conv(self.pad(x)) else: x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class DisConvModuleNew(nn.Module): def __init__(self, input_dim, cnum): super(DisConvModuleNew, self).__init__() self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2) self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2) self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2) self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2) def forward(self, input_0): primals_2 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_6 = self.conv3.conv.weight primals_7 = self.conv3.conv.bias primals_8 = self.conv4.conv.weight primals_9 = self.conv4.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
caixin1998/pl-template
DisConvModule
false
10,120
[ "BSD-3-Clause" ]
0
6918f0289ab2b32d107e5722617d25c9a683399c
https://github.com/caixin1998/pl-template/tree/6918f0289ab2b32d107e5722617d25c9a683399c
BasicModel_ConvNet_MaxPool3d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/oq/coqgjzgrzub35c4izbofdscqaryljafkxqir6ozro7547giqmpg6.py # Topologically Sorted Source Nodes: [conv3d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1906624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x2 = (xindex // 238328) % 2 x0 = xindex % 3844 x5 = (xindex // 3844) tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3872*x5)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qg/cqgdgx6btoeiqhkbtgu3pvqpke3rmcspt2z2gwfwcyvb5jjt5upf.py # Topologically Sorted Source Nodes: [conv3d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 390224 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 24389) % 4 x0 = xindex % 24389 x4 = (xindex // 24389) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (24416*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yk/cyk5bvk6vo3q5e3i77a57v3ypz4y6o6qt775lytgeooyezduf5tp.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 87808 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ww/cwwxdr6e3ruwusfymvqb6ti24bqsbo2po2ar4a4tqers4pjgvq6o.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_3 = async_compile.triton('triton_per_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 10976 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (10*x0)), tmp11, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (2, 1, 3, 3, 3), (27, 27, 9, 3, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (4, 2, 3, 3, 3), (54, 27, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8, ), (1, )) assert_size_stride(primals_8, (10, 8), (8, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62, 62, 62), (476656, 238328, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 2, 62, 62, 62), (480128, 240064, 3872, 62, 1), torch.float32) # Topologically Sorted Source Nodes: [conv3d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 1906624, grid=grid(1906624), stream=stream0) del buf0 del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool3d_with_indices] buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2, 2], [2, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 # Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 29, 29, 29), (97556, 24389, 841, 29, 1)) buf6 = empty_strided_cuda((4, 4, 29, 29, 29), (97664, 24416, 841, 29, 1), torch.float32) # Topologically Sorted Source Nodes: [conv3d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf5, primals_5, buf6, 390224, grid=grid(390224), stream=stream0) del buf5 del primals_5 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool3d_with_indices] buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2, 2], [2, 2, 2]) buf8 = buf7[0] buf9 = buf7[1] del buf7 buf10 = empty_strided_cuda((10976, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf8, (10976, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf11, primals_7, 87808, grid=grid(87808), stream=stream0) del primals_7 buf12 = empty_strided_cuda((10976, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (8, 10), (1, 8), 0), alpha=1, beta=1, out=buf12) del primals_9 buf15 = empty_strided_cuda((10976, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_3.run(buf12, buf15, 10976, 10, grid=grid(10976), stream=stream0) del buf12 return (buf15, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6, buf9, reinterpret_tensor(buf8, (10976, 4), (4, 1), 0), buf11, buf15, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 1, 3, 3, 3), (27, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2, 3, 3, 3), (54, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BasicModel_ConvNet_MaxPool3d(nn.Module): """Same as above, but with the MaxPool1d replaced with a MaxPool3d. This is useful because the MaxPool modules behave differently to other modules from the perspective of the DeepLift Attributions """ def __init__(self) ->None: super().__init__() self.conv1 = nn.Conv3d(1, 2, 3) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool3d(2) self.conv2 = nn.Conv3d(2, 4, 3) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool3d(2) self.fc1 = nn.Linear(4, 8) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(8, 10) self.softmax = nn.Softmax(dim=1) self.fc1.weight = nn.Parameter(torch.ones(8, 4)) self.fc2.weight = nn.Parameter(torch.ones(10, 8)) def forward(self, x): x = self.relu1(self.conv1(x)) x = self.pool1(x) x = self.relu2(self.conv2(x)) x = self.pool2(x) x = x.view(-1, 4) x = self.relu3(self.fc1(x)) x = self.fc2(x) return self.softmax(x) def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1906624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x2 = xindex // 238328 % 2 x0 = xindex % 3844 x5 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x5), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 390224 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 24389 % 4 x0 = xindex % 24389 x4 = xindex // 24389 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 24416 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 87808 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 10976 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (2, 1, 3, 3, 3), (27, 27, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (4, 2, 3, 3, 3), (54, 27, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (10, 8), (8, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62, 62, 62), (476656, 238328, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 2, 62, 62, 62), (480128, 240064, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1906624)](buf0, primals_2, buf1, 1906624, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2, 2], [2, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 29, 29, 29), (97556, 24389, 841, 29, 1) ) buf6 = empty_strided_cuda((4, 4, 29, 29, 29), (97664, 24416, 841, 29, 1), torch.float32) triton_poi_fused_convolution_relu_1[grid(390224)](buf5, primals_5, buf6, 390224, XBLOCK=512, num_warps=8, num_stages=1) del buf5 del primals_5 buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2, 2], [2, 2, 2]) buf8 = buf7[0] buf9 = buf7[1] del buf7 buf10 = empty_strided_cuda((10976, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (10976, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_2[grid(87808)](buf11, primals_7, 87808, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf12 = empty_strided_cuda((10976, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (8, 10), (1, 8), 0), alpha=1, beta=1, out=buf12) del primals_9 buf15 = empty_strided_cuda((10976, 10), (10, 1), torch.float32) triton_per_fused__softmax_3[grid(10976)](buf12, buf15, 10976, 10, XBLOCK=32, num_warps=4, num_stages=1) del buf12 return (buf15, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6, buf9, reinterpret_tensor(buf8, (10976, 4), (4, 1), 0), buf11, buf15, primals_8, primals_6) class BasicModel_ConvNet_MaxPool3dNew(nn.Module): """Same as above, but with the MaxPool1d replaced with a MaxPool3d. This is useful because the MaxPool modules behave differently to other modules from the perspective of the DeepLift Attributions """ def __init__(self) ->None: super().__init__() self.conv1 = nn.Conv3d(1, 2, 3) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool3d(2) self.conv2 = nn.Conv3d(2, 4, 3) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool3d(2) self.fc1 = nn.Linear(4, 8) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(8, 10) self.softmax = nn.Softmax(dim=1) self.fc1.weight = nn.Parameter(torch.ones(8, 4)) self.fc2.weight = nn.Parameter(torch.ones(10, 8)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
aravipati12/captum
BasicModel_ConvNet_MaxPool3d
false
10,121
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
SequentialCNNNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7e/c7emkemmywdizbc2dk22jgylyayrytgas3pkynkv2rcugfx3fbgp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ko/ckolze3b2rkhboj4tifvnwzo2lkguxrisl6edphahj3tsblduqdj.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 1024], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (1024*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (3072*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/52/c5276jy76qbq3iyizea75fwcetvvtf5twplgnzcooj45tx54swnf.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/up/cuppmbhehjhgagl7rthdsjbpgj2vlygxvpseble37orq4f3jjifs.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 200704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gs/cgszexmp33fmtqpfvwisguxkofvtt5aagh7glp6ns3etzhldua2y.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) % 14 x2 = (xindex // 896) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (3584*x2)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (3584*x2)), xmask) tmp3 = tl.load(in_ptr0 + (1792 + x0 + (128*x1) + (3584*x2)), xmask) tmp5 = tl.load(in_ptr0 + (1856 + x0 + (128*x1) + (3584*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c5/cc5blmfahkzenag6gn4lp747l72lk7l7cktgzanqbberxnmov4gz.py # Topologically Sorted Source Nodes: [input_4, input_5], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_4 => convolution_1 # input_5 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kl/cklb2l37lrkhdd4ime2yzpxsvp5xrsnnhzfbnyhkn3kxgs5tlfpp.py # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_6 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 100 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 5 y1 = (yindex // 5) y5 = yindex y4 = (yindex // 25) y6 = yindex % 25 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1280 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1408 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (25*x2) + (3200*y4)), tmp16, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (1024, 3200), (3200, 1)) assert_size_stride(primals_7, (1024, ), (1, )) assert_size_stride(primals_8, (84, 1024), (1024, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (50, 84), (84, 1)) assert_size_stride(primals_11, (50, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 25, grid=grid(192, 25), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 32, 32), (3072, 1, 96, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 1024, grid=grid(12, 1024), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 8192, 25, grid=grid(8192, 25), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 28, 28), (50176, 1, 1792, 64)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf4, primals_2, 200704, grid=grid(200704), stream=stream0) del primals_2 buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32) buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_4.run(buf4, buf5, buf6, 50176, grid=grid(50176), stream=stream0) # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [input_4, input_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf8, primals_5, 51200, grid=grid(51200), stream=stream0) del primals_5 buf9 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8) buf10 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_6.run(buf8, buf9, buf10, 100, 128, grid=grid(100, 128), stream=stream0) buf11 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (1024, 84), (1, 1024), 0), alpha=1, beta=1, out=buf12) del primals_9 buf13 = empty_strided_cuda((4, 50), (50, 1), torch.float32) # Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(primals_10, (84, 50), (1, 84), 0), alpha=1, beta=1, out=buf13) del primals_11 return (buf13, buf0, buf1, buf2, buf4, buf5, buf6, buf8, buf9, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), buf11, buf12, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1024, 3200), (3200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((50, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SequentialCNNNet(nn.Module): def __init__(self): super(SequentialCNNNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.fc2 = nn.Linear(1024, 84) self.fc3 = nn.Linear(84, 50) self.features = nn.Sequential(self.conv1, nn.ReLU(), self.pool, self.conv2, nn.ReLU(), self.pool) self.classifier = nn.Sequential(self.fc1, self.fc2, self.fc3) def forward(self, x): x = self.features(x) x = x.view(-1, 128 * 5 * 5) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 3072 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 14 x2 = xindex // 896 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3584 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3584 * x2), xmask) tmp3 = tl.load(in_ptr0 + (1792 + x0 + 128 * x1 + 3584 * x2), xmask) tmp5 = tl.load(in_ptr0 + (1856 + x0 + 128 * x1 + 3584 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 100 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 5 y1 = yindex // 5 y5 = yindex y4 = yindex // 25 y6 = yindex % 25 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1280 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1408 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 25 * x2 + 3200 * y4), tmp16, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1024, 3200), (3200, 1)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (84, 1024), (1024, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (50, 84), (84, 1)) assert_size_stride(primals_11, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 25)](primals_1, buf0, 192, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 32, 32), (3072, 1, 96, 3), torch. float32) triton_poi_fused_1[grid(12, 1024)](primals_3, buf1, 12, 1024, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_2[grid(8192, 25)](primals_4, buf2, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 28, 28), (50176, 1, 1792, 64)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_3[grid(200704)](buf4, primals_2, 200704, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32) buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_4[grid(50176)](buf4, buf5, buf6, 50176, XBLOCK=512, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_5[grid(51200)](buf8, primals_5, 51200, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8) buf10 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_6[grid(100, 128)](buf8, buf9, buf10, 100, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (1024, 84), (1, 1024), 0), alpha=1, beta=1, out=buf12) del primals_9 buf13 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.addmm(primals_11, buf12, reinterpret_tensor( primals_10, (84, 50), (1, 84), 0), alpha=1, beta=1, out=buf13) del primals_11 return (buf13, buf0, buf1, buf2, buf4, buf5, buf6, buf8, buf9, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), buf11, buf12, primals_10, primals_8, primals_6) class SequentialCNNNetNew(nn.Module): def __init__(self): super(SequentialCNNNetNew, self).__init__() self.conv1 = nn.Conv2d(3, 64, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.fc2 = nn.Linear(1024, 84) self.fc3 = nn.Linear(84, 50) self.features = nn.Sequential(self.conv1, nn.ReLU(), self.pool, self.conv2, nn.ReLU(), self.pool) self.classifier = nn.Sequential(self.fc1, self.fc2, self.fc3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fangkaimin/pytorch_classification_new
SequentialCNNNet
false
10,122
[ "MIT" ]
0
21032e7ab91f0f3106ba07aa97657a023b1cc717
https://github.com/fangkaimin/pytorch_classification_new/tree/21032e7ab91f0f3106ba07aa97657a023b1cc717
BasicModel_ConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ei/ceih7eq6vjz3jn7es5j3rvflanadprgtro72ql24aglbpglc7r22.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 30752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 2 x0 = xindex % 3844 x4 = (xindex // 3844) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ix/cixsl6jlfx5zcu6faevjmhq6vmxcwvrxqxyfofho5mumo3ysjcbs.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 7688 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = (xindex // 31) % 31 x4 = (xindex // 961) x3 = (xindex // 1922) x5 = xindex % 1922 tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x4)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + (1952*x3)), tmp6, xmask) tl.store(out_ptr1 + (x5 + (2048*x3)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/pl/cplbiu2hkhuv6snikbok27fpnwr6nbatvkeus64wej5qnihr7sl5.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 13456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 841) % 4 x2 = (xindex // 3364) x4 = xindex % 3364 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + (3392*x2)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bu/cbur2l4ciyqjvq2c6rmsx5hgwjymvzpnwpgnkt3fusyahayhm76b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) % 14 x2 = (xindex // 196) % 4 x3 = (xindex // 784) x4 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (58*x1) + (841*x2) + (3392*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (58*x1) + (841*x2) + (3392*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (29 + (2*x0) + (58*x1) + (841*x2) + (3392*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (30 + (2*x0) + (58*x1) + (841*x2) + (3392*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x4), tmp15, xmask) tl.store(out_ptr1 + (x4), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ev/cevkezfhxrinvhltxym4ino5jizjnctqwjukydgkj5awitzw3z7s.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3h/c3hshbglb6u72lcawx7cirqyk3cun2cecnfoynxuhhwwq6uc6cbs.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_5 = async_compile.triton('triton_per_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 784 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (10*x0)), tmp11, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8, ), (1, )) assert_size_stride(primals_8, (10, 8), (8, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 30752, grid=grid(30752), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 2, 31, 31), (1952, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 2, 31, 31), (2048, 961, 31, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 7688, grid=grid(7688), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 29, 29), (3364, 841, 29, 1)) buf5 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 13456, grid=grid(13456), stream=stream0) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.int8) buf7 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 3136, grid=grid(3136), stream=stream0) buf8 = empty_strided_cuda((784, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (784, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 6272, grid=grid(6272), stream=stream0) del primals_7 buf10 = empty_strided_cuda((784, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((784, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_5.run(buf10, buf13, 784, 10, grid=grid(784), stream=stream0) del buf10 return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (784, 4), (4, 1), 0), buf9, buf13, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn from typing import no_type_check class BasicModel_ConvNet(nn.Module): def __init__(self) ->None: super().__init__() self.conv1 = nn.Conv2d(1, 2, 3, 1) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(2) self.conv2 = nn.Conv2d(2, 4, 3, 1) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool2d(2) self.fc1 = nn.Linear(4, 8) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(8, 10) self.softmax = nn.Softmax(dim=1) self.fc1.weight = nn.Parameter(torch.ones(8, 4)) self.fc2.weight = nn.Parameter(torch.ones(10, 8)) @no_type_check def forward(self, x: 'Tensor') ->Tensor: x = self.relu1(self.conv1(x)) x = self.pool1(x) x = self.relu2(self.conv2(x)) x = self.pool2(x) x = x.view(-1, 4) x = self.relu3(self.fc1(x)) x = self.fc2(x) return self.softmax(x) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 30752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 2 x0 = xindex % 3844 x4 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 7688 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x4 = xindex // 961 x3 = xindex // 1922 x5 = xindex % 1922 tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + 1952 * x3), tmp6, xmask) tl.store(out_ptr1 + (x5 + 2048 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 13456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 841 % 4 x2 = xindex // 3364 x4 = xindex % 3364 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 3392 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 % 14 x2 = xindex // 196 % 4 x3 = xindex // 784 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3 ), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x4, tmp15, xmask) tl.store(out_ptr1 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 6272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 784 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (10, 8), (8, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(30752)](buf0, primals_2, buf1, 30752, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 2, 31, 31), (1952, 961, 31, 1), torch .float32) buf3 = empty_strided_cuda((4, 2, 31, 31), (2048, 961, 31, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(7688)](buf1, buf2, buf3, 7688, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 29, 29), (3364, 841, 29, 1)) buf5 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch .float32) triton_poi_fused_convolution_relu_2[grid(13456)](buf4, primals_5, buf5, 13456, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.int8 ) buf7 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_3[grid(3136)](buf5, buf6, buf7, 3136, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((784, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (784, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(6272)](buf9, primals_7, 6272, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((784, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((784, 10), (10, 1), torch.float32) triton_per_fused__softmax_5[grid(784)](buf10, buf13, 784, 10, XBLOCK=128, num_warps=8, num_stages=1) del buf10 return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (784, 4), (4, 1), 0), buf9, buf13, primals_8, primals_6) class BasicModel_ConvNetNew(nn.Module): def __init__(self) ->None: super().__init__() self.conv1 = nn.Conv2d(1, 2, 3, 1) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(2) self.conv2 = nn.Conv2d(2, 4, 3, 1) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool2d(2) self.fc1 = nn.Linear(4, 8) self.relu3 = nn.ReLU() self.fc2 = nn.Linear(8, 10) self.softmax = nn.Softmax(dim=1) self.fc1.weight = nn.Parameter(torch.ones(8, 4)) self.fc2.weight = nn.Parameter(torch.ones(10, 8)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
aravipati12/captum
BasicModel_ConvNet
false
10,123
[ "BSD-3-Clause" ]
0
ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
https://github.com/aravipati12/captum/tree/ef3e81d89c8c4404a49c384cf0727f2e7d393f5f
ContractingBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/bt/cbtjbmr5m5c4mhbqxveibnhvbqjvrlxo2hhzizsqqqjncnkataks.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/mq/cmqbkmbj3td6xyw4rssd26ze72hqlo6utt2dyjtn55mjlhhoz5h2.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 8 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/iv/civewhkiawqehozddx6dairbg4xswxsqz7h76kn6j6hekwge3tp5.py # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool2d => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) % 30 x2 = (xindex // 900) x5 = xindex x4 = (xindex // 7200) x6 = xindex % 7200 tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5), tmp6, xmask) tl.store(out_ptr1 + (x6 + (7296*x4)), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 62, 62), (30752, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 123008, grid=grid(123008), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 60, 60), (28800, 3600, 60, 1)) buf3 = empty_strided_cuda((4, 8, 60, 60), (28928, 3616, 60, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_5, buf3, 115200, grid=grid(115200), stream=stream0) del buf2 del primals_5 buf4 = empty_strided_cuda((4, 8, 30, 30), (7200, 900, 30, 1), torch.float32) buf5 = empty_strided_cuda((4, 8, 30, 30), (7296, 900, 30, 1), torch.int8) # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 28800, grid=grid(28800), stream=stream0) return (buf4, primals_1, primals_3, primals_4, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ContractingBlock(nn.Module): def __init__(self, input_channel): super(ContractingBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=input_channel, out_channels= input_channel * 2, kernel_size=(3, 3)) self.conv2 = nn.Conv2d(input_channel * 2, input_channel * 2, kernel_size=(3, 3)) self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) self.activation = nn.ReLU() def forward(self, x): x = self.activation(self.conv1(x)) x = self.activation(self.conv2(x)) return self.maxpool(x) def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'input_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 8 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x5 = xindex x4 = xindex // 7200 x6 = xindex % 7200 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 7296 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 62, 62), (30752, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(123008)](buf1, primals_2, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 60, 60), (28800, 3600, 60, 1)) buf3 = empty_strided_cuda((4, 8, 60, 60), (28928, 3616, 60, 1), torch.float32) triton_poi_fused_convolution_relu_1[grid(115200)](buf2, primals_5, buf3, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del buf2 del primals_5 buf4 = empty_strided_cuda((4, 8, 30, 30), (7200, 900, 30, 1), torch .float32) buf5 = empty_strided_cuda((4, 8, 30, 30), (7296, 900, 30, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_2[grid(28800)](buf3, buf4, buf5, 28800, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_3, primals_4, buf1, buf3, buf5 class ContractingBlockNew(nn.Module): def __init__(self, input_channel): super(ContractingBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=input_channel, out_channels= input_channel * 2, kernel_size=(3, 3)) self.conv2 = nn.Conv2d(input_channel * 2, input_channel * 2, kernel_size=(3, 3)) self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) self.activation = nn.ReLU() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
furkannturkmen/pytorch-CNN-architecture
ContractingBlock
false
10,124
[ "MIT" ]
0
6a864811f51409c1526224c288fe608010e0c888
https://github.com/furkannturkmen/pytorch-CNN-architecture/tree/6a864811f51409c1526224c288fe608010e0c888
BertPooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7o/c7otc5ij6whexgxcr56vlxp2l7hzg3oc4onljp557uc6wncu5gvg.py # Topologically Sorted Source Nodes: [first_token_tensor], Original ATen: [aten.sum] # Source node to ATen node mapping: # first_token_tensor => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [1]), kwargs = {}) triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [pooled_output_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # pooled_output_1 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [first_token_tensor], Original ATen: [aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_sum_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [pooled_output_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states.sum(dim=1) pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2 class BertPoolerNew(nn.Module): def __init__(self, config): super(BertPoolerNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
etherlabsio/ai-engine
BertPooler
false
10,125
[ "MIT" ]
0
e73a4419a34db42a410e2a7e7629eb946b86f2c2
https://github.com/etherlabsio/ai-engine/tree/e73a4419a34db42a410e2a7e7629eb946b86f2c2
ConvertTCHWtoCTHW
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/64/c64ahxnpt5ixqrlolbug3qf6y4u2zqmcjekif2yu4ba4hcze2fom.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class ConvertTCHWtoCTHW(torch.nn.Module): """ Convert a torch.FloatTensor of shape (TIME x CHANNELS x HEIGHT x WIDTH) to a torch.FloatTensor of shape (CHANNELS x TIME x HEIGHT x WIDTH). """ def forward(self, tensor): return tensor.permute(1, 0, 2, 3).contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ConvertTCHWtoCTHWNew(torch.nn.Module): """ Convert a torch.FloatTensor of shape (TIME x CHANNELS x HEIGHT x WIDTH) to a torch.FloatTensor of shape (CHANNELS x TIME x HEIGHT x WIDTH). """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
ConvertTCHWtoCTHW
false
10,126
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
TLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cx/ccxf66rqhne57tzktyn4px2euzht4rw75vuy3fmua4ausvcns3e5.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.maximum, aten.eq, aten.gt] # Source node to ATen node mapping: # max_1 => maximum # Graph fragment: # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_2, %primals_1), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_eq_gt_maximum_0 = async_compile.triton('triton_poi_fused_eq_gt_maximum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_gt_maximum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_gt_maximum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp0 == tmp1 tmp4 = tmp0 > tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr1 + (x3), tmp3, xmask) tl.store(out_ptr2 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.maximum, aten.eq, aten.gt] stream0 = get_raw_stream(0) triton_poi_fused_eq_gt_maximum_0.run(primals_2, primals_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.distributed class TLU(nn.Module): """ Thresholded Linear Unit """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) def forward(self, x): return torch.max(x, self.tau) def extra_repr(self): return 'num_features={}'.format(self.num_features) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_gt_maximum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp0 == tmp1 tmp4 = tmp0 > tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr1 + x3, tmp3, xmask) tl.store(out_ptr2 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_gt_maximum_0[grid(256)](primals_2, primals_1, buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, buf1, buf2 class TLUNew(nn.Module): """ Thresholded Linear Unit """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) def extra_repr(self): return 'num_features={}'.format(self.num_features) def forward(self, input_0): primals_1 = self.tau primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
derwind/mxfont
TLU
false
10,127
[ "MIT" ]
0
0b6d4554a1e2208906230d3121d792d450ed28dd
https://github.com/derwind/mxfont/tree/0b6d4554a1e2208906230d3121d792d450ed28dd
CNNNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7e/c7emkemmywdizbc2dk22jgylyayrytgas3pkynkv2rcugfx3fbgp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ko/ckolze3b2rkhboj4tifvnwzo2lkguxrisl6edphahj3tsblduqdj.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 1024], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (1024*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (3072*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/52/c5276jy76qbq3iyizea75fwcetvvtf5twplgnzcooj45tx54swnf.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/up/cuppmbhehjhgagl7rthdsjbpgj2vlygxvpseble37orq4f3jjifs.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 200704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gs/cgszexmp33fmtqpfvwisguxkofvtt5aagh7glp6ns3etzhldua2y.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) % 14 x2 = (xindex // 896) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (3584*x2)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (3584*x2)), xmask) tmp3 = tl.load(in_ptr0 + (1792 + x0 + (128*x1) + (3584*x2)), xmask) tmp5 = tl.load(in_ptr0 + (1856 + x0 + (128*x1) + (3584*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c5/cc5blmfahkzenag6gn4lp747l72lk7l7cktgzanqbberxnmov4gz.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kl/cklb2l37lrkhdd4ime2yzpxsvp5xrsnnhzfbnyhkn3kxgs5tlfpp.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 100 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 5 y1 = (yindex // 5) y5 = yindex y4 = (yindex // 25) y6 = yindex % 25 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1280 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1408 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (25*x2) + (3200*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7q/c7qqwqdzvxfbhba3xq4ekccqsza54dcn4ct4vxtz4wtiuqqqov23.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5r/c5rew3z43xyszol4yt6syaqyxi25pi7zva5brz5kbyzrds7qts2j.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (1024, 3200), (3200, 1)) assert_size_stride(primals_7, (1024, ), (1, )) assert_size_stride(primals_8, (84, 1024), (1024, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (50, 84), (84, 1)) assert_size_stride(primals_11, (50, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 25, grid=grid(192, 25), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 32, 32), (3072, 1, 96, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 1024, grid=grid(12, 1024), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 8192, 25, grid=grid(8192, 25), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 28, 28), (50176, 1, 1792, 64)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf4, primals_2, 200704, grid=grid(200704), stream=stream0) del primals_2 buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32) buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_4.run(buf4, buf5, buf6, 50176, grid=grid(50176), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf8, primals_5, 51200, grid=grid(51200), stream=stream0) del primals_5 buf9 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8) buf10 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_6.run(buf8, buf9, buf10, 100, 128, grid=grid(100, 128), stream=stream0) buf11 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), out=buf11) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_7.run(buf12, primals_7, 4096, grid=grid(4096), stream=stream0) del primals_7 buf13 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (1024, 84), (1, 1024), 0), out=buf13) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_8.run(buf14, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf15 = empty_strided_cuda((4, 50), (50, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf14, reinterpret_tensor(primals_10, (84, 50), (1, 84), 0), alpha=1, beta=1, out=buf15) del primals_11 return (buf15, buf0, buf1, buf2, buf4, buf5, buf6, buf8, buf9, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), buf12, buf14, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1024, 3200), (3200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((50, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class CNNNet(nn.Module): def __init__(self): super(CNNNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.fc2 = nn.Linear(1024, 84) self.fc3 = nn.Linear(84, 50) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 128 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 3072 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 14 x2 = xindex // 896 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3584 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3584 * x2), xmask) tmp3 = tl.load(in_ptr0 + (1792 + x0 + 128 * x1 + 3584 * x2), xmask) tmp5 = tl.load(in_ptr0 + (1856 + x0 + 128 * x1 + 3584 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 100 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 5 y1 = yindex // 5 y5 = yindex y4 = yindex // 25 y6 = yindex % 25 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1280 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1408 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 25 * x2 + 3200 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1024, 3200), (3200, 1)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (84, 1024), (1024, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (50, 84), (84, 1)) assert_size_stride(primals_11, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 25)](primals_1, buf0, 192, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 32, 32), (3072, 1, 96, 3), torch. float32) triton_poi_fused_1[grid(12, 1024)](primals_3, buf1, 12, 1024, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_2[grid(8192, 25)](primals_4, buf2, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 28, 28), (50176, 1, 1792, 64)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_3[grid(200704)](buf4, primals_2, 200704, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32) buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_4[grid(50176)](buf4, buf5, buf6, 50176, XBLOCK=512, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_5[grid(51200)](buf8, primals_5, 51200, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8) buf10 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_6[grid(100, 128)](buf8, buf9, buf10, 100, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0 ), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_7[grid(4096)](buf12, primals_7, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf13 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (1024, 84), (1, 1024), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_8[grid(336)](buf14, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.addmm(primals_11, buf14, reinterpret_tensor( primals_10, (84, 50), (1, 84), 0), alpha=1, beta=1, out=buf15) del primals_11 return (buf15, buf0, buf1, buf2, buf4, buf5, buf6, buf8, buf9, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), buf12, buf14, primals_10, primals_8, primals_6) class CNNNetNew(nn.Module): def __init__(self): super(CNNNetNew, self).__init__() self.conv1 = nn.Conv2d(3, 64, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.fc2 = nn.Linear(1024, 84) self.fc3 = nn.Linear(84, 50) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fangkaimin/pytorch_classification_new
CNNNet
false
10,128
[ "MIT" ]
0
21032e7ab91f0f3106ba07aa97657a023b1cc717
https://github.com/fangkaimin/pytorch_classification_new/tree/21032e7ab91f0f3106ba07aa97657a023b1cc717
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [CEloss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # CEloss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ld/cldleqzwoafze4tyha42odz2oe2es3iujqdyatbjtpmb7esescjf.py # Topologically Sorted Source Nodes: [CEloss, neg, pt, sub, pow_1, mul, Floss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean] # Source node to ATen node mapping: # CEloss => exp, log, mul, neg, sub_1, sum_1, sum_2 # Floss => mul_2 # mean => mean # mul => mul_1 # neg => neg_1 # pow_1 => pow_1 # pt => exp_1 # sub => sub_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %neg), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_2,), kwargs = {}) triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = -tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = 1.0 tmp31 = tmp30 - tmp29 tmp32 = 0.5 tmp33 = tmp31 * tmp32 tmp34 = tmp33 * tmp27 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.sum(tmp35, 1)[:, None] tmp38 = 64.0 tmp39 = tmp37 / tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [CEloss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [CEloss, neg, pt, sub, pow_1, mul, Floss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean] triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class FocalLoss(nn.Module): def __init__(self, alpha=0.5, gamma=1.0): super().__init__() self.alpha = alpha self.gamma = gamma def forward(self, inputs, targets, **kwargs): CEloss = nn.CrossEntropyLoss(reduction='none')(inputs, targets) pt = torch.exp(-CEloss) Floss = self.alpha * (1 - pt) ** self.gamma * CEloss return Floss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = -tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = 1.0 tmp31 = tmp30 - tmp29 tmp32 = 0.5 tmp33 = tmp31 * tmp32 tmp34 = tmp33 * tmp27 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.sum(tmp35, 1)[:, None] tmp38 = 64.0 tmp39 = tmp37 / tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)]( buf3, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class FocalLossNew(nn.Module): def __init__(self, alpha=0.5, gamma=1.0): super().__init__() self.alpha = alpha self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gurucharanmk/Fruits-360_Image_Classification
FocalLoss
false
10,129
[ "MIT" ]
0
9d26bba972ed3eca762ff225b33bd70e82edc7f0
https://github.com/gurucharanmk/Fruits-360_Image_Classification/tree/9d26bba972ed3eca762ff225b33bd70e82edc7f0
AdaptiveFeatureNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xu/cxuw4zngts3btgwtvpkzjuhzg5qrx7eatj7fzhrybxfvwesbsblb.py # Topologically Sorted Source Nodes: [norm_1, norm, radius_1, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.add, aten.sub, aten.pow, aten.mean] # Source node to ATen node mapping: # loss => mean # norm => pow_1, pow_2, sum_1 # norm_1 => pow_3, pow_4, sum_2 # pow_1 => pow_5 # radius_1 => add # sub => sub # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_4, %add), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {}) triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 4.0 tmp13 = tmp11 + tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [norm_1, norm, radius_1, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.add, aten.sub, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class AdaptiveFeatureNorm(nn.Module): """ The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_ Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm is used in order to learn task-specific features with large norms in a progressive manner. We denote parameters of backbone :math:`G` as :math:`\\theta_g`, parameters of bottleneck :math:`F_f` as :math:`\\theta_f` , parameters of classifier head :math:`F_y` as :math:`\\theta_y`, and features extracted from sample :math:`x_i` as :math:`h(x_i;\\theta)`. Full loss is calculated as follows .. math:: L(\\theta_g,\\theta_f,\\theta_y)=\\frac{1}{n_s}\\sum_{(x_i,y_i)\\in D_s}L_y(x_i,y_i)+\\frac{\\lambda}{n_s+n_t} \\sum_{x_i\\in D_s\\cup D_t}L_d(h(x_i;\\theta_0)+\\Delta_r,h(x_i;\\theta))\\\\ where :math:`L_y` denotes classification loss, :math:`L_d` denotes norm loss, :math:`\\theta_0` and :math:`\\theta` represent the updated and updating model parameters in the last and current iterations respectively. Args: delta (float): positive residual scalar to control the feature norm enlargement. Inputs: - f (tensor): feature representations on source or target domain. Shape: - f: :math:`(N, F)` where F means the dimension of input features. - Outputs: scalar. Examples:: >>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1) >>> f_s = torch.randn(32, 1000) >>> f_t = torch.randn(32, 1000) >>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t) """ def __init__(self, delta): super(AdaptiveFeatureNorm, self).__init__() self.delta = delta def forward(self, f: 'torch.Tensor') ->torch.Tensor: radius = f.norm(p=2, dim=1).detach() assert radius.requires_grad is False radius = radius + self.delta loss = ((f.norm(p=2, dim=1) - radius) ** 2).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'delta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 4.0 tmp13 = tmp11 + tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class AdaptiveFeatureNormNew(nn.Module): """ The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_ Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm is used in order to learn task-specific features with large norms in a progressive manner. We denote parameters of backbone :math:`G` as :math:`\\theta_g`, parameters of bottleneck :math:`F_f` as :math:`\\theta_f` , parameters of classifier head :math:`F_y` as :math:`\\theta_y`, and features extracted from sample :math:`x_i` as :math:`h(x_i;\\theta)`. Full loss is calculated as follows .. math:: L(\\theta_g,\\theta_f,\\theta_y)=\\frac{1}{n_s}\\sum_{(x_i,y_i)\\in D_s}L_y(x_i,y_i)+\\frac{\\lambda}{n_s+n_t} \\sum_{x_i\\in D_s\\cup D_t}L_d(h(x_i;\\theta_0)+\\Delta_r,h(x_i;\\theta))\\\\ where :math:`L_y` denotes classification loss, :math:`L_d` denotes norm loss, :math:`\\theta_0` and :math:`\\theta` represent the updated and updating model parameters in the last and current iterations respectively. Args: delta (float): positive residual scalar to control the feature norm enlargement. Inputs: - f (tensor): feature representations on source or target domain. Shape: - f: :math:`(N, F)` where F means the dimension of input features. - Outputs: scalar. Examples:: >>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1) >>> f_s = torch.randn(32, 1000) >>> f_t = torch.randn(32, 1000) >>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t) """ def __init__(self, delta): super(AdaptiveFeatureNormNew, self).__init__() self.delta = delta def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
AdaptiveFeatureNorm
false
10,130
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
ConvertTHWCtoTCHW
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/kq/ckqzjwsdojunslaxexvivgmdizmhm6czh2xpzemjckiggsrasgr3.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tl.store(out_ptr0 + (x2 + (16*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class ConvertTHWCtoTCHW(torch.nn.Module): """ Convert a torch.FloatTensor of shape (TIME x HEIGHT x WIDTH x CHANNEL) to a torch.FloatTensor of shape (TIME x CHANNELS x HEIGHT x WIDTH). """ def forward(self, tensor): return tensor.permute(0, 3, 1, 2).contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return buf0, class ConvertTHWCtoTCHWNew(torch.nn.Module): """ Convert a torch.FloatTensor of shape (TIME x HEIGHT x WIDTH x CHANNEL) to a torch.FloatTensor of shape (TIME x CHANNELS x HEIGHT x WIDTH). """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
ConvertTHWCtoTCHW
false
10,131
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
MinusRbfHSIC
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ap/capgcrfp5emechuzxbqib3bmwq6oopknqyu7tj7yeqna5gisxy3r.py # Topologically Sorted Source Nodes: [mul, add, X_L2, mul_1, kernel_XX, diag_2, tK, sum_1], Original ATen: [aten.mul, aten.add, aten.exp, aten.diagonal_copy, aten.sub, aten.sum] # Source node to ATen node mapping: # X_L2 => add_1 # add => add # diag_2 => diagonal_copy_2 # kernel_XX => exp # mul => mul # mul_1 => mul_1 # sum_1 => sum_2 # tK => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, -2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, -0.03125), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {}) # %diagonal_copy_2 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%exp,), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, %diagonal_copy_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub,), kwargs = {}) triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0 = async_compile.triton('triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = (rindex // 4) r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r2), None) tmp3 = tl.load(in_ptr0 + (5*r1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last') tmp1 = -2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = -0.03125 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = tmp5 * tmp1 tmp11 = tmp10 + tmp5 tmp12 = tmp11 + tmp5 tmp13 = tmp12 * tmp7 tmp14 = tl_math.exp(tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp15, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/w3/cw34kl3phuif6ry2aneibc6ftjlhxybmbp6a67hhchth4m2lg3un.py # Topologically Sorted Source Nodes: [trace, mul_4, truediv, truediv_1, add_4, sum_3, sum_4, dot, mul_5, truediv_2, hsic, truediv_3, neg], Original ATen: [aten.trace, aten.mul, aten.div, aten.add, aten.sum, aten.dot, aten.sub, aten.neg] # Source node to ATen node mapping: # add_4 => add_4 # dot => mul_5, sum_6 # hsic => sub_2 # mul_4 => mul_4 # mul_5 => mul_6 # neg => neg # sum_3 => sum_4 # sum_4 => sum_5 # trace => diagonal_copy_4, sum_1 # truediv => div # truediv_1 => div_1 # truediv_2 => div_2 # truediv_3 => div_3 # Graph fragment: # %diagonal_copy_4 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%mm_2,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%diagonal_copy_4,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, 3), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %div_1), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [0]), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [0]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, %sum_5), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_5,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 2), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_6, 2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %div_2), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 4), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div_3,), kwargs = {}) triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1 = async_compile.triton('triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (r0), None) tmp5 = tl.load(in_ptr1 + (4 + r0), None) tmp7 = tl.load(in_ptr1 + (8 + r0), None) tmp9 = tl.load(in_ptr1 + (12 + r0), None) tmp11 = tl.load(in_ptr2 + (r0), None) tmp12 = tl.load(in_ptr2 + (4 + r0), None) tmp14 = tl.load(in_ptr2 + (8 + r0), None) tmp16 = tl.load(in_ptr2 + (12 + r0), None) tmp22 = tl.load(in_ptr3 + (0)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1]) tmp24 = tl.load(in_ptr4 + (0)) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp10 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp26 = tmp23 * tmp25 tmp27 = 0.3333333333333333 tmp28 = tmp26 * tmp27 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp3 + tmp30 tmp32 = 2.0 tmp33 = tmp21 * tmp32 tmp34 = tmp33 * tmp29 tmp35 = tmp31 - tmp34 tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [XX], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(arg0_1, (4, 64), (64, 1), 0), reinterpret_tensor(arg0_1, (64, 4), (1, 64), 0), out=buf0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [mul, add, X_L2, mul_1, kernel_XX, diag_2, tK, sum_1], Original ATen: [aten.mul, aten.add, aten.exp, aten.diagonal_copy, aten.sub, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0.run(buf0, buf1, buf6, 1, 16, grid=grid(1), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [XX_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(arg1_1, (4, 64), (64, 1), 0), reinterpret_tensor(arg1_1, (64, 4), (1, 64), 0), out=buf2) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [mul_2, add_2, X_L2_1, mul_3, kernel_XX_1, diag_3, tL, sum_2], Original ATen: [aten.mul, aten.add, aten.exp, aten.diagonal_copy, aten.sub, aten.sum] triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0.run(buf2, buf3, buf7, 1, 16, grid=grid(1), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm] extern_kernels.mm(buf1, buf3, out=buf4) buf5 = empty_strided_cuda((), (), torch.float32) buf9 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [trace, mul_4, truediv, truediv_1, add_4, sum_3, sum_4, dot, mul_5, truediv_2, hsic, truediv_3, neg], Original ATen: [aten.trace, aten.mul, aten.div, aten.add, aten.sum, aten.dot, aten.sub, aten.neg] triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1.run(buf9, buf4, buf1, buf3, buf6, buf7, 1, 4, grid=grid(1), stream=stream0) del buf1 del buf3 del buf4 del buf6 del buf7 return (buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.distributed class HSIC(nn.Module): """Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC) ..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent. Empirically, we use the finite sample estimator of HSIC (with m observations) by, (1) biased estimator (HSIC_0) Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. :math: (m - 1)^2 tr KHLH. where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices). (2) unbiased estimator (HSIC_1) Song, Le, et al. "Feature selection via dependence maximization." 2012. :math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg]. where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero. Parameters ---------- sigma_x : float the kernel size of the kernel function for X. sigma_y : float the kernel size of the kernel function for Y. algorithm: str ('unbiased' / 'biased') the algorithm for the finite sample estimator. 'unbiased' is used for our paper. reduction: not used (for compatibility with other losses). """ def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased', reduction=None): super(HSIC, self).__init__() if sigma_y is None: sigma_y = sigma_x self.sigma_x = sigma_x self.sigma_y = sigma_y if algorithm == 'biased': self.estimator = self.biased_estimator elif algorithm == 'unbiased': self.estimator = self.unbiased_estimator else: raise ValueError('invalid estimator: {}'.format(algorithm)) def _kernel_x(self, X): raise NotImplementedError def _kernel_y(self, Y): raise NotImplementedError def biased_estimator(self, input1, input2): """Biased estimator of Hilbert-Schmidt Independence Criterion Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. """ K = self._kernel_x(input1) L = self._kernel_y(input2) KH = K - K.mean(0, keepdim=True) LH = L - L.mean(0, keepdim=True) N = len(input1) return torch.trace(KH @ LH / (N - 1) ** 2) def unbiased_estimator(self, input1, input2): """Unbiased estimator of Hilbert-Schmidt Independence Criterion Song, Le, et al. "Feature selection via dependence maximization." 2012. """ kernel_XX = self._kernel_x(input1) kernel_YY = self._kernel_y(input2) tK = kernel_XX - torch.diag(kernel_XX) tL = kernel_YY - torch.diag(kernel_YY) N = len(input1) hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1 ) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2) return hsic / (N * (N - 3)) def forward(self, input1, input2, **kwargs): return self.estimator(input1, input2) class RbfHSIC(HSIC): """Radial Basis Function (RBF) kernel HSIC implementation. """ def _kernel(self, X, sigma): X = X.view(len(X), -1) XX = X @ X.t() X_sqnorms = torch.diag(XX) X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0) gamma = 1 / (2 * sigma ** 2) kernel_XX = torch.exp(-gamma * X_L2) return kernel_XX def _kernel_x(self, X): return self._kernel(X, self.sigma_x) def _kernel_y(self, Y): return self._kernel(Y, self.sigma_y) class MinusRbfHSIC(RbfHSIC): """``Minus'' RbfHSIC for the ``max'' optimization. """ def forward(self, input1, input2, **kwargs): return -self.estimator(input1, input2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sigma_x': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp1 = -2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = -0.03125 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = tmp5 * tmp1 tmp11 = tmp10 + tmp5 tmp12 = tmp11 + tmp5 tmp13 = tmp12 * tmp7 tmp14 = tl_math.exp(tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp15, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) @triton.jit def triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl .constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr1 + (4 + r0), None) tmp7 = tl.load(in_ptr1 + (8 + r0), None) tmp9 = tl.load(in_ptr1 + (12 + r0), None) tmp11 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr2 + (4 + r0), None) tmp14 = tl.load(in_ptr2 + (8 + r0), None) tmp16 = tl.load(in_ptr2 + (12 + r0), None) tmp22 = tl.load(in_ptr3 + 0) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1]) tmp24 = tl.load(in_ptr4 + 0) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp10 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp26 = tmp23 * tmp25 tmp27 = 0.3333333333333333 tmp28 = tmp26 * tmp27 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp3 + tmp30 tmp32 = 2.0 tmp33 = tmp21 * tmp32 tmp34 = tmp33 * tmp29 tmp35 = tmp31 - tmp34 tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(arg0_1, (4, 64), (64, 1), 0), reinterpret_tensor(arg0_1, (64, 4), (1, 64), 0), out=buf0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0[grid(1)](buf0, buf1, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(arg1_1, (4, 64), (64, 1), 0), reinterpret_tensor(arg1_1, (64, 4), (1, 64), 0), out=buf2) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0[grid(1)](buf2, buf3, buf7, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf4 = buf2 del buf2 extern_kernels.mm(buf1, buf3, out=buf4) buf5 = empty_strided_cuda((), (), torch.float32) buf9 = buf5 del buf5 triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1[grid(1)](buf9, buf4, buf1, buf3, buf6, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf3 del buf4 del buf6 del buf7 return buf9, class HSIC(nn.Module): """Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC) ..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent. Empirically, we use the finite sample estimator of HSIC (with m observations) by, (1) biased estimator (HSIC_0) Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. :math: (m - 1)^2 tr KHLH. where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices). (2) unbiased estimator (HSIC_1) Song, Le, et al. "Feature selection via dependence maximization." 2012. :math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg]. where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero. Parameters ---------- sigma_x : float the kernel size of the kernel function for X. sigma_y : float the kernel size of the kernel function for Y. algorithm: str ('unbiased' / 'biased') the algorithm for the finite sample estimator. 'unbiased' is used for our paper. reduction: not used (for compatibility with other losses). """ def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased', reduction=None): super(HSIC, self).__init__() if sigma_y is None: sigma_y = sigma_x self.sigma_x = sigma_x self.sigma_y = sigma_y if algorithm == 'biased': self.estimator = self.biased_estimator elif algorithm == 'unbiased': self.estimator = self.unbiased_estimator else: raise ValueError('invalid estimator: {}'.format(algorithm)) def _kernel_x(self, X): raise NotImplementedError def _kernel_y(self, Y): raise NotImplementedError def biased_estimator(self, input1, input2): """Biased estimator of Hilbert-Schmidt Independence Criterion Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. """ K = self._kernel_x(input1) L = self._kernel_y(input2) KH = K - K.mean(0, keepdim=True) LH = L - L.mean(0, keepdim=True) N = len(input1) return torch.trace(KH @ LH / (N - 1) ** 2) def unbiased_estimator(self, input1, input2): """Unbiased estimator of Hilbert-Schmidt Independence Criterion Song, Le, et al. "Feature selection via dependence maximization." 2012. """ kernel_XX = self._kernel_x(input1) kernel_YY = self._kernel_y(input2) tK = kernel_XX - torch.diag(kernel_XX) tL = kernel_YY - torch.diag(kernel_YY) N = len(input1) hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1 ) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2) return hsic / (N * (N - 3)) def forward(self, input1, input2, **kwargs): return self.estimator(input1, input2) class RbfHSIC(HSIC): """Radial Basis Function (RBF) kernel HSIC implementation. """ def _kernel(self, X, sigma): X = X.view(len(X), -1) XX = X @ X.t() X_sqnorms = torch.diag(XX) X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0) gamma = 1 / (2 * sigma ** 2) kernel_XX = torch.exp(-gamma * X_L2) return kernel_XX def _kernel_x(self, X): return self._kernel(X, self.sigma_x) def _kernel_y(self, Y): return self._kernel(Y, self.sigma_y) class MinusRbfHSICNew(RbfHSIC): """``Minus'' RbfHSIC for the ``max'' optimization. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
derwind/mxfont
MinusRbfHSIC
false
10,132
[ "MIT" ]
0
0b6d4554a1e2208906230d3121d792d450ed28dd
https://github.com/derwind/mxfont/tree/0b6d4554a1e2208906230d3121d792d450ed28dd
BasicCNN2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3r/c3rslv2exiiurkywxt4oxifqli4zlmoxh26mfj7b2xmxbacdj7n6.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ld/cldjmvv4msesfeqlj3jxelxyzs2cjiixhopwrnnctlh6upd2jdua.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wv/cwvgka2tdnkfhotjblshnd7peeqx5dbyqvmgelrsa445t7sdxarg.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oa/coa3vdd4jeg5klv6wypjdd2yt36xodp6tqonllq54rha7343zhle.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gh/cgh65wxy6qy3yx3pt7lwzfms6zxyocfnvufux6hdxgvqrt6ux4rc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sl/csl2ee26agnux54v4a3w3vnx6hxiwq2j3vcyhgkdarkoqfzf63ga.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/mj/cmjbcbqfnfsu3y2kncps6yei6kzbizdwciwacwy636iaqhj7ayip.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4c/c4c3knz3pygroowt7w3yxro5taa6upjqk446dr7qzq7kfzi25as3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) % 32 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (64*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2080 + x0 + (64*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gy/cgybgxxsr64qz6vxobnj6ehqtct5e3yxnbggqymt7ujvkqngg5ae.py # Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2o/c2ooxr73bx6syvyjvoumzhe7bf3oexfie5t7casmohxvvsq5qvc5.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_4 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 16 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (128*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + (128*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/23/c237qe7562aiyk4ns62dlzt5mkc5j32w73ie5vxeytrodt2lrzyw.py # Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jd/cjdwlnz7nsod5hoavyr7fqiwhaui4ldgqmzadkpskebf6prqwoll.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_7 => _low_memory_max_pool2d_with_offsets_2, getitem_5 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_5, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = (yindex // 8) y5 = yindex y4 = (yindex // 64) y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2048 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2176 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (64*x2) + (8192*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cf/ccf5s7sdehofgkyxk2rmhskkp3xi6wgp4g7g3fwys4qb6m24ioq6.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_10 => relu_6 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (128, 2048), (2048, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (10, 128), (128, 1)) assert_size_stride(primals_17, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 96, 9, grid=grid(96, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 1024, 9, grid=grid(1024, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_12, buf6, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_12 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf8, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 524288, grid=grid(524288), stream=stream0) del primals_5 buf11 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32) buf12 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 262144, grid=grid(262144), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf16, primals_9, 262144, grid=grid(262144), stream=stream0) del primals_9 buf17 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf18 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_10.run(buf16, buf17, buf18, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf17, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf20, primals_11, 131072, grid=grid(131072), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [conv2d_5, relu_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf22, primals_13, 131072, grid=grid(131072), stream=stream0) del primals_13 buf23 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) buf24 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_12.run(buf22, buf23, buf24, 256, 128, grid=grid(256, 128), stream=stream0) buf25 = empty_strided_cuda((16, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_14, (2048, 128), (1, 2048), 0), out=buf25) buf26 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu] triton_poi_fused_relu_13.run(buf26, primals_15, 2048, grid=grid(2048), stream=stream0) del primals_15 buf27 = empty_strided_cuda((16, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.addmm] extern_kernels.addmm(primals_17, buf26, reinterpret_tensor(primals_16, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf27) del primals_17 return (buf27, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf11, buf12, buf14, buf16, buf17, buf18, buf20, buf22, buf23, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), buf26, primals_16, primals_14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BasicCNN2(nn.Module): def __init__(self): super().__init__() self.layer_names = ['conv11', 'conv12', 'conv21', 'conv22', 'conv31', 'conv32', 'fc1', 'output_layer'] self.conv11 = nn.Conv2d(3, 32, 3, padding=1) self.conv12 = nn.Conv2d(32, 32, 3, padding=1) self.conv21 = nn.Conv2d(32, 64, 3, padding=1) self.conv22 = nn.Conv2d(64, 64, 3, padding=1) self.conv31 = nn.Conv2d(64, 128, 3, padding=1) self.conv32 = nn.Conv2d(128, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(4 * 4 * 128, 128) self.output_layer = nn.Linear(128, 10) self.dropout = nn.Dropout(0.2) def forward(self, x): x = F.relu(self.conv11(x)) x = self.pool(F.relu(self.conv12(x))) x = self.dropout(x) x = F.relu(self.conv21(x)) x = self.pool(F.relu(self.conv22(x))) x = self.dropout(x) x = F.relu(self.conv31(x)) x = self.pool(F.relu(self.conv32(x))) x = self.dropout(x) x = x.view(-1, 4 * 4 * 128) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.output_layer(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 32 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 64 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2080 + x0 + 64 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 16 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y5 = yindex y4 = yindex // 64 y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2048 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2176 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 64 * x2 + 8192 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 2048), (2048, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (10, 128), (128, 1)) assert_size_stride(primals_17, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(1024, 9)](primals_4, buf2, 1024, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_3[grid(2048, 9)](primals_6, buf3, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_4[grid(4096, 9)](primals_8, buf4, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_5[grid(8192, 9)](primals_10, buf5, 8192, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_6[grid(16384, 9)](primals_12, buf6, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_7[grid(524288)](buf8, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(524288)](buf10, primals_5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf11 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32) buf12 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(131072)](buf10, buf11, buf12, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_9[grid(262144)](buf14, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_9[grid(262144)](buf16, primals_9, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf17 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf18 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(65536)](buf16, buf17, buf18, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf17, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf20 = buf19 del buf19 triton_poi_fused_convolution_relu_11[grid(131072)](buf20, primals_11, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_11[grid(131072)](buf22, primals_13, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf23 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) buf24 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_12[grid(256, 128)](buf22, buf23, buf24, 256, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((16, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_14, (2048, 128), (1, 2048), 0), out=buf25) buf26 = buf25 del buf25 triton_poi_fused_relu_13[grid(2048)](buf26, primals_15, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf27 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_17, buf26, reinterpret_tensor( primals_16, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf27) del primals_17 return (buf27, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf11, buf12, buf14, buf16, buf17, buf18, buf20, buf22, buf23, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), buf26, primals_16, primals_14) class BasicCNN2New(nn.Module): def __init__(self): super().__init__() self.layer_names = ['conv11', 'conv12', 'conv21', 'conv22', 'conv31', 'conv32', 'fc1', 'output_layer'] self.conv11 = nn.Conv2d(3, 32, 3, padding=1) self.conv12 = nn.Conv2d(32, 32, 3, padding=1) self.conv21 = nn.Conv2d(32, 64, 3, padding=1) self.conv22 = nn.Conv2d(64, 64, 3, padding=1) self.conv31 = nn.Conv2d(64, 128, 3, padding=1) self.conv32 = nn.Conv2d(128, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(4 * 4 * 128, 128) self.output_layer = nn.Linear(128, 10) self.dropout = nn.Dropout(0.2) def forward(self, input_0): primals_1 = self.conv11.weight primals_2 = self.conv11.bias primals_4 = self.conv12.weight primals_5 = self.conv12.bias primals_6 = self.conv21.weight primals_7 = self.conv21.bias primals_8 = self.conv22.weight primals_9 = self.conv22.bias primals_10 = self.conv31.weight primals_11 = self.conv31.bias primals_12 = self.conv32.weight primals_13 = self.conv32.bias primals_14 = self.fc1.weight primals_15 = self.fc1.bias primals_16 = self.output_layer.weight primals_17 = self.output_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
fnc11/CosDefence
BasicCNN2
false
10,133
[ "MIT" ]
0
94f451b7d4b36cb3b9fcc85098dae242f311532b
https://github.com/fnc11/CosDefence/tree/94f451b7d4b36cb3b9fcc85098dae242f311532b
SimpleNeuralNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ul/culvxc5xcnacfjypzxghwcyc2445sqsz25ci4rib6axjxs3fv3so.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yr/cyr6fatjcqc5np3quy6arljtkkff4qjmueyb5b4pk5xvkxgrzuvd.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) del buf3 return (buf4, primals_1, buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SimpleNeuralNet(nn.Module): def __init__(self, n_in, n_hidden, n_out): super().__init__() self.linear1 = nn.Linear(n_in, n_hidden) self.linear2 = nn.Linear(n_hidden, n_out) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.linear1(x)) return F.log_softmax(self.linear2(x), dim=-1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_in': 4, 'n_hidden': 4, 'n_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 return buf4, primals_1, buf1, buf4, primals_4 class SimpleNeuralNetNew(nn.Module): def __init__(self, n_in, n_hidden, n_out): super().__init__() self.linear1 = nn.Linear(n_in, n_hidden) self.linear2 = nn.Linear(n_hidden, n_out) def forward(self, input_0): primals_1 = self.linear1.weight primals_3 = self.linear1.bias primals_2 = self.linear2.weight primals_5 = self.linear2.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gjwgit/mnist
SimpleNeuralNet
false
10,134
[ "MIT" ]
0
77551a2600a3df06228546cfe6729df4803b6521
https://github.com/gjwgit/mnist/tree/77551a2600a3df06228546cfe6729df4803b6521
EMDLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/by/cbyierpm3pbebhkkiv5bu7g55geq4bts3id4zzn3makdomwbxyp7.py # Topologically Sorted Source Nodes: [cdf_estimate], Original ATen: [aten.cumsum] # Source node to ATen node mapping: # cdf_estimate => cumsum_1 # Graph fragment: # %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%arg1_1, 1), kwargs = {}) triton_per_fused_cumsum_0 = async_compile.triton('triton_per_fused_cumsum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/et/cet6qmj4sulda5ae5dgboczyvubmiljbh73b4gflcbuorop6iqqx.py # Topologically Sorted Source Nodes: [cdf_diff, abs_1, pow_1, mean, samplewise_emd, mean_1], Original ATen: [aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] # Source node to ATen node mapping: # abs_1 => abs_1 # cdf_diff => sub # mean => mean # mean_1 => mean_1 # pow_1 => pow_1 # samplewise_emd => sqrt # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum_1, %cumsum), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1]), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {}) triton_per_fused_abs_mean_pow_sqrt_sub_1 = async_compile.triton('triton_per_fused_abs_mean_pow_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_pow_sqrt_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_pow_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp6 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp11 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp17 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp18 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tl_math.abs(tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 - tmp18 tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 4.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = 64.0 tmp30 = tmp28 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp30, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cdf_estimate], Original ATen: [aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused_cumsum_0.run(arg1_1, buf0, 64, 4, grid=grid(64), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cdf_target], Original ATen: [aten.cumsum] triton_per_fused_cumsum_0.run(arg0_1, buf1, 64, 4, grid=grid(64), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [cdf_diff, abs_1, pow_1, mean, samplewise_emd, mean_1], Original ATen: [aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] triton_per_fused_abs_mean_pow_sqrt_sub_1.run(buf3, buf0, buf1, 1, 64, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class EMDLoss(nn.Module): """EMDLoss class """ def __init__(self): super(EMDLoss, self).__init__() def forward(self, p_pred: 'torch.Tensor', p_true: 'torch.Tensor'): assert p_true.shape == p_pred.shape, 'Length of the two distribution must be the same' cdf_target = torch.cumsum(p_true, dim=1) cdf_estimate = torch.cumsum(p_pred, dim=1) cdf_diff = cdf_estimate - cdf_target samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff ), 2), dim=1)) return samplewise_emd.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl .constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp3, xmask) @triton.jit def triton_per_fused_abs_mean_pow_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp6 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp17 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tl_math.abs(tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 - tmp18 tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 4.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = 64.0 tmp30 = tmp28 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_0[grid(64)](arg1_1, buf0, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_cumsum_0[grid(64)](arg0_1, buf1, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_abs_mean_pow_sqrt_sub_1[grid(1)](buf3, buf0, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, class EMDLossNew(nn.Module): """EMDLoss class """ def __init__(self): super(EMDLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
groundzhou/Image-aesthetic-assesment
EMDLoss
false
10,135
[ "MIT" ]
0
0b22f60cdae11650153027c768a6a488b02ff9e4
https://github.com/groundzhou/Image-aesthetic-assesment/tree/0b22f60cdae11650153027c768a6a488b02ff9e4
CorrelationAlignmentLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/i2/ci2cwyw4jxpalwmyllccuq6a5yjb26kh7daxxjmn3axgrehmtnb3.py # Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff], Original ATen: [aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # mean_diff => mean_2 # mean_s => mean # mean_t => mean_1 # pow_1 => pow_1 # sub_2 => sub_2 # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [0], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mean_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused_mean_pow_sub_0 = async_compile.triton('triton_per_fused_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp9 = tl.load(in_ptr1 + (r0), None) tmp10 = tl.load(in_ptr1 + (4 + r0), None) tmp12 = tl.load(in_ptr1 + (8 + r0), None) tmp14 = tl.load(in_ptr1 + (12 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xu/cxuzmvlqkyjljbyjwrwylrfxhfwrjpbsfgbjary2lrutdlnkenqz.py # Topologically Sorted Source Nodes: [mean_s, cent_s], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # cent_s => sub # mean_s => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) triton_poi_fused_mean_sub_1 = async_compile.triton('triton_poi_fused_mean_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7y/c7y3bczxpszkpgidavo5bb3dnvtxg5td2yb7lhaxaq6gi7vwtkbu.py # Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff, cov_s, cov_t, sub_3, pow_2, cov_diff, add], Original ATen: [aten.mean, aten.sub, aten.pow, aten.div, aten.add] # Source node to ATen node mapping: # add => add # cov_diff => mean_3 # cov_s => div # cov_t => div_1 # mean_diff => mean_2 # mean_s => mean # mean_t => mean_1 # pow_1 => pow_1 # pow_2 => pow_2 # sub_2 => sub_2 # sub_3 => sub_3 # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [0], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mean_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, 3), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_1, 3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_2, %mean_3), kwargs = {}) triton_per_fused_add_div_mean_pow_sub_2 = async_compile.triton('triton_per_fused_add_div_mean_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_pow_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp10 = tl.load(in_out_ptr0 + (0)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1]) tmp1 = 0.3333333333333333 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 16.0 tmp15 = tmp9 / tmp14 tmp16 = tmp13 + tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff], Original ATen: [aten.mean, aten.sub, aten.pow] stream0 = get_raw_stream(0) triton_per_fused_mean_pow_sub_0.run(arg0_1, arg1_1, buf0, 1, 4, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean_s, cent_s], Original ATen: [aten.mean, aten.sub] triton_poi_fused_mean_sub_1.run(arg0_1, buf1, 16, grid=grid(16), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), buf1, out=buf2) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mean_t, cent_t], Original ATen: [aten.mean, aten.sub] triton_poi_fused_mean_sub_1.run(arg1_1, buf3, 16, grid=grid(16), stream=stream0) del arg1_1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0), buf3, out=buf4) del buf3 buf6 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff, cov_s, cov_t, sub_3, pow_2, cov_diff, add], Original ATen: [aten.mean, aten.sub, aten.pow, aten.div, aten.add] triton_per_fused_add_div_mean_pow_sub_2.run(buf6, buf2, buf4, 1, 16, grid=grid(1), stream=stream0) del buf2 del buf4 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class CorrelationAlignmentLoss(nn.Module): """The `Correlation Alignment Loss` in `Deep CORAL: Correlation Alignment for Deep Domain Adaptation (ECCV 2016) <https://arxiv.org/pdf/1607.01719.pdf>`_. Given source features :math:`f_S` and target features :math:`f_T`, the covariance matrices are given by .. math:: C_S = \\frac{1}{n_S-1}(f_S^Tf_S-\\frac{1}{n_S}(\\textbf{1}^Tf_S)^T(\\textbf{1}^Tf_S)) .. math:: C_T = \\frac{1}{n_T-1}(f_T^Tf_T-\\frac{1}{n_T}(\\textbf{1}^Tf_T)^T(\\textbf{1}^Tf_T)) where :math:`\\textbf{1}` denotes a column vector with all elements equal to 1, :math:`n_S, n_T` denotes number of source and target samples, respectively. We use :math:`d` to denote feature dimension, use :math:`{\\Vert\\cdot\\Vert}^2_F` to denote the squared matrix `Frobenius norm`. The correlation alignment loss is given by .. math:: l_{CORAL} = \\frac{1}{4d^2}\\Vert C_S-C_T \\Vert^2_F Inputs: - f_s (tensor): feature representations on source domain, :math:`f^s` - f_t (tensor): feature representations on target domain, :math:`f^t` Shape: - f_s, f_t: :math:`(N, d)` where d means the dimension of input features, :math:`N=n_S=n_T` is mini-batch size. - Outputs: scalar. """ def __init__(self): super(CorrelationAlignmentLoss, self).__init__() def forward(self, f_s: 'torch.Tensor', f_t: 'torch.Tensor') ->torch.Tensor: mean_s = f_s.mean(0, keepdim=True) mean_t = f_t.mean(0, keepdim=True) cent_s = f_s - mean_s cent_t = f_t - mean_t cov_s = torch.mm(cent_s.t(), cent_s) / (len(f_s) - 1) cov_t = torch.mm(cent_t.t(), cent_t) / (len(f_t) - 1) mean_diff = (mean_s - mean_t).pow(2).mean() cov_diff = (cov_s - cov_t).pow(2).mean() return mean_diff + cov_diff def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp9 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr1 + (4 + r0), None) tmp12 = tl.load(in_ptr1 + (8 + r0), None) tmp14 = tl.load(in_ptr1 + (12 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) @triton.jit def triton_poi_fused_mean_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_add_div_mean_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_out_ptr0 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1]) tmp1 = 0.3333333333333333 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 16.0 tmp15 = tmp9 / tmp14 tmp16 = tmp13 + tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_mean_pow_sub_0[grid(1)](arg0_1, arg1_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_sub_1[grid(16)](arg0_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), buf1, out=buf2) buf3 = buf1 del buf1 triton_poi_fused_mean_sub_1[grid(16)](arg1_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0), buf3, out=buf4) del buf3 buf6 = buf0 del buf0 triton_per_fused_add_div_mean_pow_sub_2[grid(1)](buf6, buf2, buf4, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf4 return buf6, class CorrelationAlignmentLossNew(nn.Module): """The `Correlation Alignment Loss` in `Deep CORAL: Correlation Alignment for Deep Domain Adaptation (ECCV 2016) <https://arxiv.org/pdf/1607.01719.pdf>`_. Given source features :math:`f_S` and target features :math:`f_T`, the covariance matrices are given by .. math:: C_S = \\frac{1}{n_S-1}(f_S^Tf_S-\\frac{1}{n_S}(\\textbf{1}^Tf_S)^T(\\textbf{1}^Tf_S)) .. math:: C_T = \\frac{1}{n_T-1}(f_T^Tf_T-\\frac{1}{n_T}(\\textbf{1}^Tf_T)^T(\\textbf{1}^Tf_T)) where :math:`\\textbf{1}` denotes a column vector with all elements equal to 1, :math:`n_S, n_T` denotes number of source and target samples, respectively. We use :math:`d` to denote feature dimension, use :math:`{\\Vert\\cdot\\Vert}^2_F` to denote the squared matrix `Frobenius norm`. The correlation alignment loss is given by .. math:: l_{CORAL} = \\frac{1}{4d^2}\\Vert C_S-C_T \\Vert^2_F Inputs: - f_s (tensor): feature representations on source domain, :math:`f^s` - f_t (tensor): feature representations on target domain, :math:`f^t` Shape: - f_s, f_t: :math:`(N, d)` where d means the dimension of input features, :math:`N=n_S=n_T` is mini-batch size. - Outputs: scalar. """ def __init__(self): super(CorrelationAlignmentLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
CorrelationAlignmentLoss
false
10,136
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
GaussianKernel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xe/cxedqk5kxfk4utnfrixbnyqhrlbgj4i6sv6keatvftozhw66gz74.py # Topologically Sorted Source Nodes: [sub, pow_1, l2_distance_square, neg, mean, mul, mul_1, truediv, exp], Original ATen: [aten.sub, aten.pow, aten.sum, aten.neg, aten.mean, aten.mul, aten.div, aten.exp] # Source node to ATen node mapping: # exp => exp # l2_distance_square => sum_1 # mean => mean # mul => mul # mul_1 => mul_1 # neg => neg # pow_1 => pow_1 # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %mul_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) % 4 r2 = (rindex // 64) r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tmp24 = 1.0 tmp25 = tmp23 * tmp24 tmp26 = -tmp18 tmp27 = 2.0 tmp28 = tmp25 * tmp27 tmp29 = tmp26 / tmp28 tmp30 = tl_math.exp(tmp29) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp25, None) tl.store(out_ptr1 + (tl.broadcast_to(r3, [RBLOCK])), tmp30, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, l2_distance_square, neg, mean, mul, mul_1, truediv, exp], Original ATen: [aten.sub, aten.pow, aten.sum, aten.neg, aten.mean, aten.mul, aten.div, aten.exp] stream0 = get_raw_stream(0) triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0.run(buf2, arg0_1, buf3, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from typing import Optional import torch.utils.data class GaussianKernel(nn.Module): """Gaussian Kernel Matrix Gaussian Kernel k is defined by .. math:: k(x_1, x_2) = \\exp \\left( - \\dfrac{\\| x_1 - x_2 \\|^2}{2\\sigma^2} \\right) where :math:`x_1, x_2 \\in R^d` are 1-d tensors. Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),` .. math:: K(X)_{i,j} = k(x_i, x_j) Also by default, during training this layer keeps running estimates of the mean of L2 distances, which are then used to set hyperparameter :math:`\\sigma`. Mathematically, the estimation is :math:`\\sigma^2 = \\dfrac{\\alpha}{n^2}\\sum_{i,j} \\| x_i - x_j \\|^2`. If :attr:`track_running_stats` is set to ``False``, this layer then does not keep running estimates, and use a fixed :math:`\\sigma` instead. Args: sigma (float, optional): bandwidth :math:`\\sigma`. Default: None track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\\sigma^2`. Otherwise, it won't track such statistics and always uses fix :math:`\\sigma^2`. Default: ``True`` alpha (float, optional): :math:`\\alpha` which decides the magnitude of :math:`\\sigma^2` when track_running_stats is set to ``True`` Inputs: - X (tensor): input group :math:`X` Shape: - Inputs: :math:`(minibatch, F)` where F means the dimension of input features. - Outputs: :math:`(minibatch, minibatch)` """ def __init__(self, sigma: 'Optional[float]'=None, track_running_stats: 'Optional[bool]'=True, alpha: 'Optional[float]'=1.0): super(GaussianKernel, self).__init__() assert track_running_stats or sigma is not None self.sigma_square = torch.tensor(sigma * sigma ) if sigma is not None else None self.track_running_stats = track_running_stats self.alpha = alpha def forward(self, X: 'torch.Tensor') ->torch.Tensor: l2_distance_square = ((X.unsqueeze(0) - X.unsqueeze(1)) ** 2).sum(2) if self.track_running_stats: self.sigma_square = self.alpha * torch.mean(l2_distance_square. detach()) return torch.exp(-l2_distance_square / (2 * self.sigma_square)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from typing import Optional import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 % 4 r2 = rindex // 64 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tmp24 = 1.0 tmp25 = tmp23 * tmp24 tmp26 = -tmp18 tmp27 = 2.0 tmp28 = tmp25 * tmp27 tmp29 = tmp26 / tmp28 tmp30 = tl_math.exp(tmp29) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None) tl.store(out_ptr1 + tl.broadcast_to(r3, [RBLOCK]), tmp30, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0[grid(1)](buf2, arg0_1, buf3, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf3, buf2 class GaussianKernelNew(nn.Module): """Gaussian Kernel Matrix Gaussian Kernel k is defined by .. math:: k(x_1, x_2) = \\exp \\left( - \\dfrac{\\| x_1 - x_2 \\|^2}{2\\sigma^2} \\right) where :math:`x_1, x_2 \\in R^d` are 1-d tensors. Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),` .. math:: K(X)_{i,j} = k(x_i, x_j) Also by default, during training this layer keeps running estimates of the mean of L2 distances, which are then used to set hyperparameter :math:`\\sigma`. Mathematically, the estimation is :math:`\\sigma^2 = \\dfrac{\\alpha}{n^2}\\sum_{i,j} \\| x_i - x_j \\|^2`. If :attr:`track_running_stats` is set to ``False``, this layer then does not keep running estimates, and use a fixed :math:`\\sigma` instead. Args: sigma (float, optional): bandwidth :math:`\\sigma`. Default: None track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\\sigma^2`. Otherwise, it won't track such statistics and always uses fix :math:`\\sigma^2`. Default: ``True`` alpha (float, optional): :math:`\\alpha` which decides the magnitude of :math:`\\sigma^2` when track_running_stats is set to ``True`` Inputs: - X (tensor): input group :math:`X` Shape: - Inputs: :math:`(minibatch, F)` where F means the dimension of input features. - Outputs: :math:`(minibatch, minibatch)` """ def __init__(self, sigma: 'Optional[float]'=None, track_running_stats: 'Optional[bool]'=True, alpha: 'Optional[float]'=1.0): super(GaussianKernelNew, self).__init__() assert track_running_stats or sigma is not None self.sigma_square = torch.tensor(sigma * sigma ) if sigma is not None else None self.track_running_stats = track_running_stats self.alpha = alpha def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
GaussianKernel
false
10,137
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
BatchSpectralShrinkage
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3s/c3skx4mn2m33smtk7yzu674msvwvmvura3rr2xlhz6y3zxy3e2j4.py # Topologically Sorted Source Nodes: [pow_1, result], Original ATen: [aten.pow, aten.add] # Source node to ATen node mapping: # pow_1 => pow_1 # result => add # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 0), kwargs = {}) triton_poi_fused_add_pow_0 = async_compile.triton('triton_poi_fused_add_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (3)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [svd], Original ATen: [aten._linalg_svd] buf0 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0)) del arg0_1 buf2 = buf0[1] del buf0 buf4 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_1, result], Original ATen: [aten.pow, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_pow_0.run(buf2, buf4, 1, grid=grid(1), stream=stream0) del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class BatchSpectralShrinkage(nn.Module): """ The regularization term in `Catastrophic Forgetting Meets Negative Transfer: Batch Spectral Shrinkage for Safe Transfer Learning (NIPS 2019) <https://proceedings.neurips.cc/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf>`_. The BSS regularization of feature matrix :math:`F` can be described as: .. math:: L_{bss}(F) = \\sum_{i=1}^{k} \\sigma_{-i}^2 , where :math:`k` is the number of singular values to be penalized, :math:`\\sigma_{-i}` is the :math:`i`-th smallest singular value of feature matrix :math:`F`. All the singular values of feature matrix :math:`F` are computed by `SVD`: .. math:: F = U\\Sigma V^T, where the main diagonal elements of the singular value matrix :math:`\\Sigma` is :math:`[\\sigma_1, \\sigma_2, ..., \\sigma_b]`. Args: k (int): The number of singular values to be penalized. Default: 1 Shape: - Input: :math:`(b, |\\mathcal{f}|)` where :math:`b` is the batch size and :math:`|\\mathcal{f}|` is feature dimension. - Output: scalar. """ def __init__(self, k=1): super(BatchSpectralShrinkage, self).__init__() self.k = k def forward(self, feature): result = 0 _u, s, _v = torch.svd(feature.t()) num = s.size(0) for i in range(self.k): result += torch.pow(s[num - 1 - i], 2) return result def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 3) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0)) del arg0_1 buf2 = buf0[1] del buf0 buf4 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_poi_fused_add_pow_0[grid(1)](buf2, buf4, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf2 return buf4, class BatchSpectralShrinkageNew(nn.Module): """ The regularization term in `Catastrophic Forgetting Meets Negative Transfer: Batch Spectral Shrinkage for Safe Transfer Learning (NIPS 2019) <https://proceedings.neurips.cc/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf>`_. The BSS regularization of feature matrix :math:`F` can be described as: .. math:: L_{bss}(F) = \\sum_{i=1}^{k} \\sigma_{-i}^2 , where :math:`k` is the number of singular values to be penalized, :math:`\\sigma_{-i}` is the :math:`i`-th smallest singular value of feature matrix :math:`F`. All the singular values of feature matrix :math:`F` are computed by `SVD`: .. math:: F = U\\Sigma V^T, where the main diagonal elements of the singular value matrix :math:`\\Sigma` is :math:`[\\sigma_1, \\sigma_2, ..., \\sigma_b]`. Args: k (int): The number of singular values to be penalized. Default: 1 Shape: - Input: :math:`(b, |\\mathcal{f}|)` where :math:`b` is the batch size and :math:`|\\mathcal{f}|` is feature dimension. - Output: scalar. """ def __init__(self, k=1): super(BatchSpectralShrinkageNew, self).__init__() self.k = k def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
BatchSpectralShrinkage
false
10,138
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
BridgeFeatLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ds/cdswxvs3hoxarp72punvmpueo4aqrc4ih7ujshqdg4qhgexbwiig.py # Topologically Sorted Source Nodes: [dist_mixed], Original ATen: [aten.cat] # Source node to ATen node mapping: # dist_mixed => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sqrt, %sqrt_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 2 x0 = xindex % 16 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp18 + tmp22 tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = libdevice.sqrt(tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp4, tmp26, tmp27) tmp29 = tmp0 >= tmp3 tmp30 = tl.full([1], 2, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + (x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr2 + (16 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr2 + (32 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tmp43 * tmp43 tmp45 = tmp40 + tmp44 tmp46 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr2 + (48 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp48 tmp50 = tmp45 + tmp49 tmp51 = triton_helpers.maximum(tmp50, tmp24) tmp52 = libdevice.sqrt(tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp29, tmp52, tmp53) tmp55 = tl.where(tmp4, tmp28, tmp54) tl.store(out_ptr0 + (x3), tmp55, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ck/cckdt23kyogbyhrcnu7l23peqelhmrhx37dr5tgwdfuqdi7vissw.py # Topologically Sorted Source Nodes: [mul, lam_dist_mixed, loss], Original ATen: [aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # lam_dist_mixed => sum_3 # loss => mean # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg3_1, %cat), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_3,), kwargs = {}) triton_per_fused_mean_mul_sum_1 = async_compile.triton('triton_per_fused_mean_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (32*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (32*r1)), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + (32*r1)), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + (32*r1)), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 2, 4, 4), (32, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dist_mixed], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, arg2_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, lam_dist_mixed, loss], Original ATen: [aten.mul, aten.sum, aten.mean] triton_per_fused_mean_mul_sum_1.run(buf2, arg3_1, buf0, 1, 64, grid=grid(1), stream=stream0) del arg3_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class BridgeFeatLoss(nn.Module): """Bridge loss on feature space. """ def __init__(self): super(BridgeFeatLoss, self).__init__() def forward(self, f_s, f_t, f_mixed, lam): dist_mixed2s = ((f_mixed - f_s) ** 2).sum(1, keepdim=True) dist_mixed2t = ((f_mixed - f_t) ** 2).sum(1, keepdim=True) dist_mixed2s = dist_mixed2s.clamp(min=1e-12).sqrt() dist_mixed2t = dist_mixed2t.clamp(min=1e-12).sqrt() dist_mixed = torch.cat((dist_mixed2s, dist_mixed2t), 1) lam_dist_mixed = (lam * dist_mixed).sum(1, keepdim=True) loss = lam_dist_mixed.mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 2, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp18 + tmp22 tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = libdevice.sqrt(tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp4, tmp26, tmp27) tmp29 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp32 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + (x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr2 + (16 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr2 + (32 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tmp43 * tmp43 tmp45 = tmp40 + tmp44 tmp46 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr2 + (48 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp48 tmp50 = tmp45 + tmp49 tmp51 = triton_helpers.maximum(tmp50, tmp24) tmp52 = libdevice.sqrt(tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp29, tmp52, tmp53) tmp55 = tl.where(tmp4, tmp28, tmp54) tl.store(out_ptr0 + x3, tmp55, xmask) @triton.jit def triton_per_fused_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 32 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 32 * r1), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + 32 * r1), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + 32 * r1), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 2, 4, 4), (32, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](arg0_1, arg1_1, arg2_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mean_mul_sum_1[grid(1)](buf2, arg3_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg3_1 del buf0 return buf2, class BridgeFeatLossNew(nn.Module): """Bridge loss on feature space. """ def __init__(self): super(BridgeFeatLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
BridgeFeatLoss
false
10,139
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
DivLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/tc/ctc25ciiogeyj3ememo4qh6yvwcievqofewsgojc4deqsei44rn4.py # Topologically Sorted Source Nodes: [mu, sub, pow_1, mean_1, clamp, std, sum_1, loss_std], Original ATen: [aten.mean, aten.sub, aten.pow, aten.clamp, aten.sqrt, aten.sum, aten.neg] # Source node to ATen node mapping: # clamp => clamp_min # loss_std => neg # mean_1 => mean_1 # mu => mean # pow_1 => pow_1 # std => sqrt # sub => sub # sum_1 => sum_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0], True), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sqrt,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0 = async_compile.triton('triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = triton_helpers.maximum(tmp20, tmp21) tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = -tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp27, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mu, sub, pow_1, mean_1, clamp, std, sum_1, loss_std], Original ATen: [aten.mean, aten.sub, aten.pow, aten.clamp, aten.sqrt, aten.sum, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class DivLoss(nn.Module): """Diversity loss, which is defined as negative of standard deviation. """ def __init__(self): super(DivLoss, self).__init__() def forward(self, lam): mu = lam.mean(0) std = ((lam - mu) ** 2).mean(0, keepdim=True).clamp(min=1e-12).sqrt() loss_std = -std.sum() return loss_std def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = triton_helpers.maximum(tmp20, tmp21) tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = -tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class DivLossNew(nn.Module): """Diversity loss, which is defined as negative of standard deviation. """ def __init__(self): super(DivLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
DivLoss
false
10,140
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
VanillaGenerativeAdversarialLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/dj/cdjw23gjsvidmi7xlhfpigbxfqor2vsiltadbvmh3w5h4gtvhk46.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default, full_default_1, log1p, mean, minimum, mul, neg, sub_1, sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %arg0_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class VanillaGenerativeAdversarialLoss(nn.Module): """ Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_ Args: reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` Inputs: - prediction (tensor): unnormalized discriminator predictions - real (bool): if the ground truth label is for real images or fake images. Default: true .. warning:: Do not use sigmoid as the last layer of Discriminator. """ def __init__(self, reduction='mean'): super(VanillaGenerativeAdversarialLoss, self).__init__() self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction) def forward(self, prediction, real=True): if real: label = torch.ones_like(prediction) else: label = torch.zeros_like(prediction) return self.bce_loss(prediction, label) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class VanillaGenerativeAdversarialLossNew(nn.Module): """ Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_ Args: reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` Inputs: - prediction (tensor): unnormalized discriminator predictions - real (bool): if the ground truth label is for real images or fake images. Default: true .. warning:: Do not use sigmoid as the last layer of Discriminator. """ def __init__(self, reduction='mean'): super(VanillaGenerativeAdversarialLossNew, self).__init__() self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
VanillaGenerativeAdversarialLoss
false
10,141
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
Theta
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cv/ccvh5pcjc57oskvep5v2okylauufvnesitt2j6dbyvr6fh3vwuer.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul] # Source node to ATen node mapping: # output => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7k/c7kfhnpi6ogpuakmzhvlayoy3mb73znoruxx7bdkt5awlqvsi5js.py # Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.mul] # Source node to ATen node mapping: # autograd_function_apply => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import torch import torch.nn as nn from typing import Tuple from typing import Optional from typing import Any import torch.utils.data class GradientReverseFunction(Function): @staticmethod def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0 ) ->torch.Tensor: ctx.coeff = coeff output = input * 1.0 return output @staticmethod def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch. Tensor, Any]: return grad_output.neg() * ctx.coeff, None class GradientReverseLayer(nn.Module): def __init__(self): super(GradientReverseLayer, self).__init__() def forward(self, *input): return GradientReverseFunction.apply(*input) class Theta(nn.Module): """ maximize loss respect to :math:` heta` minimize loss respect to features """ def __init__(self, dim: 'int'): super(Theta, self).__init__() self.grl1 = GradientReverseLayer() self.grl2 = GradientReverseLayer() self.layer1 = nn.Linear(dim, dim) nn.init.eye_(self.layer1.weight) nn.init.zeros_(self.layer1.bias) def forward(self, features: 'torch.Tensor') ->torch.Tensor: features = self.grl1(features) return self.grl2(self.layer1(features)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import torch.nn as nn from typing import Tuple from typing import Optional from typing import Any import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class GradientReverseFunction(Function): @staticmethod def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0 ) ->torch.Tensor: ctx.coeff = coeff output = input * 1.0 return output @staticmethod def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch. Tensor, Any]: return grad_output.neg() * ctx.coeff, None class GradientReverseLayer(nn.Module): def __init__(self): super(GradientReverseLayer, self).__init__() def forward(self, *input): return GradientReverseFunction.apply(*input) class ThetaNew(nn.Module): """ maximize loss respect to :math:` heta` minimize loss respect to features """ def __init__(self, dim: 'int'): super(ThetaNew, self).__init__() self.grl1 = GradientReverseLayer() self.grl2 = GradientReverseLayer() self.layer1 = nn.Linear(dim, dim) nn.init.eye_(self.layer1.weight) nn.init.zeros_(self.layer1.bias) def forward(self, input_0): primals_2 = self.layer1.weight primals_3 = self.layer1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
XianyuanLiu/Transfer-Learning-Library
Theta
false
10,142
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
LeastSquaresGenerativeAdversarialLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fn/cfnan4prpp2nynvmknv62m7csvqnx5mzj3pgx6jhbs444xjvww6z.py # Topologically Sorted Source Nodes: [label, mse_loss], Original ATen: [aten.ones_like, aten.mse_loss] # Source node to ATen node mapping: # label => full_default # mse_loss => mean, pow_1, sub # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused_mse_loss_ones_like_0 = async_compile.triton('triton_per_fused_mse_loss_ones_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_ones_like_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_ones_like_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [label, mse_loss], Original ATen: [aten.ones_like, aten.mse_loss] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_ones_like_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class LeastSquaresGenerativeAdversarialLoss(nn.Module): """ Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_ Args: reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` Inputs: - prediction (tensor): unnormalized discriminator predictions - real (bool): if the ground truth label is for real images or fake images. Default: true .. warning:: Do not use sigmoid as the last layer of Discriminator. """ def __init__(self, reduction='mean'): super(LeastSquaresGenerativeAdversarialLoss, self).__init__() self.mse_loss = nn.MSELoss(reduction=reduction) def forward(self, prediction, real=True): if real: label = torch.ones_like(prediction) else: label = torch.zeros_like(prediction) return self.mse_loss(prediction, label) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_ones_like_0(in_out_ptr0, in_ptr0, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_ones_like_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class LeastSquaresGenerativeAdversarialLossNew(nn.Module): """ Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_ Args: reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` Inputs: - prediction (tensor): unnormalized discriminator predictions - real (bool): if the ground truth label is for real images or fake images. Default: true .. warning:: Do not use sigmoid as the last layer of Discriminator. """ def __init__(self, reduction='mean'): super(LeastSquaresGenerativeAdversarialLossNew, self).__init__() self.mse_loss = nn.MSELoss(reduction=reduction) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
LeastSquaresGenerativeAdversarialLoss
false
10,143
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
BatchSpectralPenalizationLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3r/c3rbz7dw35fg7j6ysnzuk22mifl2txzhu2tdvutfxll4lensmwuh.py # Topologically Sorted Source Nodes: [pow_1, pow_2, loss], Original ATen: [aten.pow, aten.add] # Source node to ATen node mapping: # loss => add # pow_1 => pow_1 # pow_2 => pow_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {}) triton_poi_fused_add_pow_0 = async_compile.triton('triton_poi_fused_add_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [svd], Original ATen: [aten._linalg_svd] buf0 = torch.ops.aten._linalg_svd.default(arg0_1) del arg0_1 buf2 = buf0[1] del buf0 # Topologically Sorted Source Nodes: [svd_1], Original ATen: [aten._linalg_svd] buf4 = torch.ops.aten._linalg_svd.default(arg1_1) del arg1_1 buf6 = buf4[1] del buf4 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, pow_2, loss], Original ATen: [aten.pow, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_pow_0.run(buf2, buf6, buf8, 16, grid=grid(16), stream=stream0) del buf2 del buf6 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class BatchSpectralPenalizationLoss(nn.Module): """Batch spectral penalization loss from `Transferability vs. Discriminability: Batch Spectral Penalization for Adversarial Domain Adaptation (ICML 2019) <http://ise.thss.tsinghua.edu.cn/~mlong/doc/batch-spectral-penalization-icml19.pdf>`_. Given source features :math:`f_s` and target features :math:`f_t` in current mini batch, singular value decomposition is first performed .. math:: f_s = U_s\\Sigma_sV_s^T .. math:: f_t = U_t\\Sigma_tV_t^T Then batch spectral penalization loss is calculated as .. math:: loss=\\sum_{i=1}^k(\\sigma_{s,i}^2+\\sigma_{t,i}^2) where :math:`\\sigma_{s,i},\\sigma_{t,i}` refer to the :math:`i-th` largest singular value of source features and target features respectively. We empirically set :math:`k=1`. Inputs: - f_s (tensor): feature representations on source domain, :math:`f^s` - f_t (tensor): feature representations on target domain, :math:`f^t` Shape: - f_s, f_t: :math:`(N, F)` where F means the dimension of input features. - Outputs: scalar. """ def __init__(self): super(BatchSpectralPenalizationLoss, self).__init__() def forward(self, f_s, f_t): _, s_s, _ = torch.svd(f_s) _, s_t, _ = torch.svd(f_t) loss = torch.pow(s_s[0], 2) + torch.pow(s_t[0], 2) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._linalg_svd.default(arg0_1) del arg0_1 buf2 = buf0[1] del buf0 buf4 = torch.ops.aten._linalg_svd.default(arg1_1) del arg1_1 buf6 = buf4[1] del buf4 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_pow_0[grid(16)](buf2, buf6, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf2 del buf6 return buf8, class BatchSpectralPenalizationLossNew(nn.Module): """Batch spectral penalization loss from `Transferability vs. Discriminability: Batch Spectral Penalization for Adversarial Domain Adaptation (ICML 2019) <http://ise.thss.tsinghua.edu.cn/~mlong/doc/batch-spectral-penalization-icml19.pdf>`_. Given source features :math:`f_s` and target features :math:`f_t` in current mini batch, singular value decomposition is first performed .. math:: f_s = U_s\\Sigma_sV_s^T .. math:: f_t = U_t\\Sigma_tV_t^T Then batch spectral penalization loss is calculated as .. math:: loss=\\sum_{i=1}^k(\\sigma_{s,i}^2+\\sigma_{t,i}^2) where :math:`\\sigma_{s,i},\\sigma_{t,i}` refer to the :math:`i-th` largest singular value of source features and target features respectively. We empirically set :math:`k=1`. Inputs: - f_s (tensor): feature representations on source domain, :math:`f^s` - f_t (tensor): feature representations on target domain, :math:`f^t` Shape: - f_s, f_t: :math:`(N, F)` where F means the dimension of input features. - Outputs: scalar. """ def __init__(self): super(BatchSpectralPenalizationLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
BatchSpectralPenalizationLoss
false
10,144
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
Vgg16
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/f7/cf7tayhctr3m6ezk7xezotpdlc5h4drokdkbz4vy2pfkbdxnmn4q.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rv/crv3uzu52jbc4u62gio2klk6cj5xhjt7yazr75tq67kvtteddsn5.py # Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # h => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ku/ckuscrbyawdttbdara4zmhmq3lgm6lvxmizlt7j4v446lfogr7ah.py # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 32 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ws/cwssgzseoqxwmttgkoxdmvdzcrtg4ars5flpnsa2at2qixzwygfj.py # Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # h_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/a5/ca5aycvivtwycqu7yn2xzgnljbqetxezkymwgte32n4b4c3doezm.py # Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_5 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 16 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c3/cc36sjgk3au3ve2witr7srumjy6npsyym5bconvmq65prldokmso.py # Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # h_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/l4/cl4n5dxp5ry2ji6m3g5uyniuwrai22ts6qhsulpbeng2mhu4ibj7.py # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_9 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_14 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) % 8 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bo/cbory36nvcjc37vmkyigprzjn5qrg2tdk4ivdkunxl3icdtgur5z.py # Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # h_10 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7j/c7jd3cm6j3ovynecovhl5prgqhcik5umc42dyk3cqoyl3ul6ahpm.py # Topologically Sorted Source Nodes: [conv2d_12, h_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_12 => convolution_12 # h_15 => relu_12 # Graph fragment: # %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_12, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = (yindex // 512) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (32768*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (64*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (512*x2) + (32768*y1)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_26 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf15, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [conv2d_1, h_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf17, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_10.run(buf17, buf18, buf19, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf21, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [conv2d_3, h_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf23, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) # Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_12.run(buf23, buf24, buf25, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf27, primals_11, 262144, grid=grid(262144), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [conv2d_5, h_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf29, primals_13, 262144, grid=grid(262144), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [conv2d_6, h_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf31, primals_15, 262144, grid=grid(262144), stream=stream0) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_14.run(buf31, buf32, buf33, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf35, primals_17, 131072, grid=grid(131072), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [conv2d_8, h_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf37, primals_19, 131072, grid=grid(131072), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_9, h_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf39, primals_21, 131072, grid=grid(131072), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = buf40; del buf40 # reuse # Topologically Sorted Source Nodes: [conv2d_10, h_13], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf41, primals_23, 131072, grid=grid(131072), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [conv2d_11, h_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf43, primals_25, 131072, grid=grid(131072), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) # Topologically Sorted Source Nodes: [conv2d_12, h_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_16.run(buf44, primals_27, buf45, buf46, 2048, 64, grid=grid(2048, 64), stream=stream0) del buf44 del primals_27 return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class Vgg16(nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, X): h = F.relu(self.conv1_1(X), inplace=True) h = F.relu(self.conv1_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h), inplace=True) h = F.relu(self.conv2_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h), inplace=True) h = F.relu(self.conv3_2(h), inplace=True) h = F.relu(self.conv3_3(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h), inplace=True) h = F.relu(self.conv4_2(h), inplace=True) h = F.relu(self.conv4_3(h), inplace=True) h = F.relu(self.conv5_1(h), inplace=True) h = F.relu(self.conv5_2(h), inplace=True) h = F.relu(self.conv5_3(h), inplace=True) relu5_3 = h return relu5_3 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17, buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23, buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(262144)](buf27, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(262144)](buf29, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(262144)](buf31, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31, buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_15[grid(131072)](buf35, primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_15[grid(131072)](buf37, primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_15[grid(131072)](buf39, primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_15[grid(131072)](buf41, primals_23, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_15[grid(131072)](buf43, primals_25, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(2048, 64) ](buf44, primals_27, buf45, buf46, 2048, 64, XBLOCK=32, YBLOCK= 32, num_warps=4, num_stages=1) del buf44 del primals_27 return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46) class Vgg16New(nn.Module): def __init__(self): super(Vgg16New, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
chaitrasj/GAN-based-Visible-Thermal-Person-ReID
Vgg16
false
10,145
[ "MIT" ]
0
8fd65ce3ab5403056fbe6e3574d1a7d02a315e62
https://github.com/chaitrasj/GAN-based-Visible-Thermal-Person-ReID/tree/8fd65ce3ab5403056fbe6e3574d1a7d02a315e62
SimpleNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hr/chrb4easzia4dw753qymwrkwdvp5554y3k2pc6zt3dtjbxgoihzj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3h/c3h4j2ijkg2jblud277lnet2ev5gdk6lyerwb36365pdquknq43k.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_2 => tanh_2 # Graph fragment: # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64), (64, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (1, 64), (64, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 64), (1, 64), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf7, primals_9, 4096, grid=grid(4096), stream=stream0) del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf9) del primals_11 return (reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SimpleNet(nn.Module): def __init__(self, ni): super().__init__() self.linear1 = nn.Linear(ni, 128) self.linear2 = nn.Linear(128, 128) self.linear3 = nn.Linear(128, 64) self.linear4 = nn.Linear(64, 64) self.linear5 = nn.Linear(64, 1) def forward(self, x): x = F.tanh(self.linear1(x)) x = F.tanh(self.linear2(x)) x = F.tanh(self.linear3(x)) x = F.tanh(self.linear4(x)) x = self.linear5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ni': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64), (64, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1, 64), (64, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(8192)](buf3, primals_5, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(4096)](buf5, primals_7, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 64), (1, 64), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf6 triton_poi_fused_tanh_1[grid(4096)](buf7, primals_9, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 1), (1, 64), 0 ), alpha=1, beta=1, out=buf9) del primals_11 return reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4 class SimpleNetNew(nn.Module): def __init__(self, ni): super().__init__() self.linear1 = nn.Linear(ni, 128) self.linear2 = nn.Linear(128, 128) self.linear3 = nn.Linear(128, 64) self.linear4 = nn.Linear(64, 64) self.linear5 = nn.Linear(64, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.linear5.weight primals_11 = self.linear5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
haakonrob/AI-Feynman
SimpleNet
false
10,146
[ "MIT" ]
0
445b68e9a260dcea67a94eed6e0aeb267f25d2ef
https://github.com/haakonrob/AI-Feynman/tree/445b68e9a260dcea67a94eed6e0aeb267f25d2ef
TripletLossXBM
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/vn/cvn7neyecxwucvxwix3iwvixzuf4n42a24h7glkmj7uusonposlh.py # Topologically Sorted Source Nodes: [add, mul, dist_mat, clamp, dist_mat_1, eq, identity_mat, sub_1, mul_1, add_1, sort, mul_2, add_2, sort_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.sort] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # clamp => clamp_min # dist_mat => sub # dist_mat_1 => sqrt # eq => eq # identity_mat => convert_element_type # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # sort => sort # sort_1 => sort_1 # sub_1 => sub_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %permute), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 1e-12), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%permute_2, %expand_3), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, -10000000.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_1), kwargs = {}) # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_1, 1, True), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 10000000.0), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_2), kwargs = {}) # %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_2, 1), kwargs = {}) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (4*r1), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp30 = tl.load(in_ptr2 + (x0 + (4*r1)), xmask, other=0.0) tmp31 = tl.load(in_ptr3 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 1e-12 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = libdevice.sqrt(tmp28) tmp32 = tmp30 == tmp31 tmp33 = tmp32.to(tl.float32) tmp34 = 1.0 tmp35 = tmp34 - tmp33 tmp36 = -10000000.0 tmp37 = tmp35 * tmp36 tmp38 = tmp29 + tmp37 tmp39 = r1 tmp40 = tmp39.to(tl.int16) tmp41 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp42 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43, tmp44, = triton_helpers.sort_with_index(tmp41, tmp42, None, 1, stable=False, descending=True) tmp45 = 10000000.0 tmp46 = tmp33 * tmp45 tmp47 = tmp29 + tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp49, tmp50, = triton_helpers.sort_with_index(tmp48, tmp42, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + (4*x0)), tmp26, xmask) tl.store(out_ptr0 + (r1 + (4*x0)), tmp43, xmask) tl.store(out_ptr1 + (r1 + (4*x0)), tmp49, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ft/cft3mvcb27x3qu2aa7eszgtcgn7kayjq33cnvtx3ey4sz5yyvzsq.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean] # Source node to ATen node mapping: # loss => add_3, clamp_min_1, full_default, mean, mul_3, sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %select), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %sub_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%mul_3, 0.3), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_1,), kwargs = {}) triton_per_fused_add_clamp_min_mean_mul_neg_sub_1 = async_compile.triton('triton_per_fused_add_clamp_min_mean_mul_neg_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_min_mean_mul_neg_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 0.3 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 4.0 tmp13 = tmp11 / tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul, dist_mat, clamp, dist_mat_1, eq, identity_mat, sub_1, mul_1, add_1, sort, mul_2, add_2, sort_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.sort] stream0 = get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, buf2, buf4, 4, 4, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean] triton_per_fused_add_clamp_min_mean_mul_neg_sub_1.run(buf7, buf4, buf2, 1, 4, grid=grid(1), stream=stream0) del buf2 del buf4 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms.functional as F import torch.utils.data def hard_examples_mining(dist_mat, identity_mat, return_idxes=False): """Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_ Args: dist_mat (tensor): pairwise distance matrix between two sets of features identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and :math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\\_mat[i, j] = 1`, otherwise :math:`identity\\_mat[i, j] = 0` return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False """ sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + -10000000.0 * (1 - identity_mat), dim=1, descending=True) dist_ap = sorted_dist_mat[:, 0] hard_positive_idxes = sorted_idxes[:, 0] sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 10000000.0 * identity_mat, dim=1, descending=False) dist_an = sorted_dist_mat[:, 0] hard_negative_idxes = sorted_idxes[:, 0] if return_idxes: return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes return dist_ap, dist_an def pairwise_euclidean_distance(x, y): """Compute pairwise euclidean distance between two sets of features""" m, n = x.size(0), y.size(0) dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() - 2 * torch.matmul(x, y.t()) dist_mat = dist_mat.clamp(min=1e-12).sqrt() return dist_mat class TripletLossXBM(nn.Module): """Triplet loss augmented with batch hard from `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_. The only difference from triplet loss lies in that both features from current mini batch and external storage (XBM) are involved. Args: margin (float, optional): margin of triplet loss. Default: 0.3 normalize_feature (bool, optional): if True, normalize features into unit norm first before computing loss. Default: False Inputs: - f (tensor): features of current mini batch, :math:`f` - labels (tensor): identity labels for current mini batch, :math:`labels` - xbm_f (tensor): features collected from XBM, :math:`xbm\\_f` - xbm_labels (tensor): corresponding identity labels of xbm_f, :math:`xbm\\_labels` Shape: - f: :math:`(minibatch, F)`, where :math:`F` is the feature dimension - labels: :math:`(minibatch, )` - xbm_f: :math:`(minibatch, F)` - xbm_labels: :math:`(minibatch, )` """ def __init__(self, margin=0.3, normalize_feature=False): super(TripletLossXBM, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.ranking_loss = nn.MarginRankingLoss(margin=margin) def forward(self, f, labels, xbm_f, xbm_labels): if self.normalize_feature: f = F.normalize(f) xbm_f = F.normalize(xbm_f) dist_mat = pairwise_euclidean_distance(f, xbm_f) n, m = f.size(0), xbm_f.size(0) identity_mat = labels.expand(m, n).t().eq(xbm_labels.expand(n, m) ).float() dist_ap, dist_an = hard_examples_mining(dist_mat, identity_mat) y = torch.ones_like(dist_an) loss = self.ranking_loss(dist_an, dist_ap, y) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0(in_out_ptr0 , in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp30 = tl.load(in_ptr2 + (x0 + 4 * r1), xmask, other=0.0) tmp31 = tl.load(in_ptr3 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 1e-12 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = libdevice.sqrt(tmp28) tmp32 = tmp30 == tmp31 tmp33 = tmp32.to(tl.float32) tmp34 = 1.0 tmp35 = tmp34 - tmp33 tmp36 = -10000000.0 tmp37 = tmp35 * tmp36 tmp38 = tmp29 + tmp37 tmp39 = r1 tmp40 = tmp39.to(tl.int16) tmp41 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp42 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43, _tmp44 = triton_helpers.sort_with_index(tmp41, tmp42, None, 1, stable=False, descending=True) tmp45 = 10000000.0 tmp46 = tmp33 * tmp45 tmp47 = tmp29 + tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp49, _tmp50 = triton_helpers.sort_with_index(tmp48, tmp42, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp26, xmask) tl.store(out_ptr0 + (r1 + 4 * x0), tmp43, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp49, xmask) @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 0.3 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 4.0 tmp13 = tmp11 / tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0[grid(4) ](buf1, arg0_1, arg1_1, arg2_1, arg3_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_add_clamp_min_mean_mul_neg_sub_1[grid(1)](buf7, buf4, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf4 return buf7, def hard_examples_mining(dist_mat, identity_mat, return_idxes=False): """Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_ Args: dist_mat (tensor): pairwise distance matrix between two sets of features identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and :math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\\_mat[i, j] = 1`, otherwise :math:`identity\\_mat[i, j] = 0` return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False """ sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + -10000000.0 * (1 - identity_mat), dim=1, descending=True) dist_ap = sorted_dist_mat[:, 0] hard_positive_idxes = sorted_idxes[:, 0] sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 10000000.0 * identity_mat, dim=1, descending=False) dist_an = sorted_dist_mat[:, 0] hard_negative_idxes = sorted_idxes[:, 0] if return_idxes: return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes return dist_ap, dist_an def pairwise_euclidean_distance(x, y): """Compute pairwise euclidean distance between two sets of features""" m, n = x.size(0), y.size(0) dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() - 2 * torch.matmul(x, y.t()) dist_mat = dist_mat.clamp(min=1e-12).sqrt() return dist_mat class TripletLossXBMNew(nn.Module): """Triplet loss augmented with batch hard from `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_. The only difference from triplet loss lies in that both features from current mini batch and external storage (XBM) are involved. Args: margin (float, optional): margin of triplet loss. Default: 0.3 normalize_feature (bool, optional): if True, normalize features into unit norm first before computing loss. Default: False Inputs: - f (tensor): features of current mini batch, :math:`f` - labels (tensor): identity labels for current mini batch, :math:`labels` - xbm_f (tensor): features collected from XBM, :math:`xbm\\_f` - xbm_labels (tensor): corresponding identity labels of xbm_f, :math:`xbm\\_labels` Shape: - f: :math:`(minibatch, F)`, where :math:`F` is the feature dimension - labels: :math:`(minibatch, )` - xbm_f: :math:`(minibatch, F)` - xbm_labels: :math:`(minibatch, )` """ def __init__(self, margin=0.3, normalize_feature=False): super(TripletLossXBMNew, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.ranking_loss = nn.MarginRankingLoss(margin=margin) def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
TripletLossXBM
false
10,147
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
TripletLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/pn/cpna4n3g6gpu2paowpo7xvvdimeicnhmebfegjcmy4wk3wodlry4.py # Topologically Sorted Source Nodes: [add, mul, dist_mat, clamp, dist_mat_1, eq, identity_mat, sub_1, mul_1, add_1, sort, mul_2, add_2, sort_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.sort] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # clamp => clamp_min # dist_mat => sub # dist_mat_1 => sqrt # eq => eq # identity_mat => convert_element_type # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # sort => sort # sort_1 => sort_1 # sub_1 => sub_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %permute), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 1e-12), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%expand_2, %permute_2), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, -10000000.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_1), kwargs = {}) # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_1, 1, True), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 10000000.0), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_2), kwargs = {}) # %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_2, 1), kwargs = {}) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp30 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (x0 + (4*r1)), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 1e-12 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = libdevice.sqrt(tmp28) tmp32 = tmp30 == tmp31 tmp33 = tmp32.to(tl.float32) tmp34 = 1.0 tmp35 = tmp34 - tmp33 tmp36 = -10000000.0 tmp37 = tmp35 * tmp36 tmp38 = tmp29 + tmp37 tmp39 = r1 tmp40 = tmp39.to(tl.int16) tmp41 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp42 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43, tmp44, = triton_helpers.sort_with_index(tmp41, tmp42, None, 1, stable=False, descending=True) tmp45 = 10000000.0 tmp46 = tmp33 * tmp45 tmp47 = tmp29 + tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp49, tmp50, = triton_helpers.sort_with_index(tmp48, tmp42, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + (4*x0)), tmp26, xmask) tl.store(out_ptr0 + (r1 + (4*x0)), tmp43, xmask) tl.store(out_ptr1 + (r1 + (4*x0)), tmp49, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/e3/ce32pujiahet4pqxynqfc4b7tbaczqldo5uzescozpvluutc4mbw.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean] # Source node to ATen node mapping: # loss => add_3, clamp_min_1, full_default, mean, mul_3, sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %select), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %sub_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%mul_3, 4.0), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_1,), kwargs = {}) triton_per_fused_add_clamp_min_mean_mul_neg_sub_1 = async_compile.triton('triton_per_fused_add_clamp_min_mean_mul_neg_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_min_mean_mul_neg_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 4.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = tmp11 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul, dist_mat, clamp, dist_mat_1, eq, identity_mat, sub_1, mul_1, add_1, sort, mul_2, add_2, sort_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.sort] stream0 = get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0.run(buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean] triton_per_fused_add_clamp_min_mean_mul_neg_sub_1.run(buf7, buf4, buf2, 1, 4, grid=grid(1), stream=stream0) del buf2 del buf4 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms.functional as F import torch.utils.data def hard_examples_mining(dist_mat, identity_mat, return_idxes=False): """Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_ Args: dist_mat (tensor): pairwise distance matrix between two sets of features identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and :math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\\_mat[i, j] = 1`, otherwise :math:`identity\\_mat[i, j] = 0` return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False """ sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + -10000000.0 * (1 - identity_mat), dim=1, descending=True) dist_ap = sorted_dist_mat[:, 0] hard_positive_idxes = sorted_idxes[:, 0] sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 10000000.0 * identity_mat, dim=1, descending=False) dist_an = sorted_dist_mat[:, 0] hard_negative_idxes = sorted_idxes[:, 0] if return_idxes: return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes return dist_ap, dist_an def pairwise_euclidean_distance(x, y): """Compute pairwise euclidean distance between two sets of features""" m, n = x.size(0), y.size(0) dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() - 2 * torch.matmul(x, y.t()) dist_mat = dist_mat.clamp(min=1e-12).sqrt() return dist_mat class TripletLoss(nn.Module): """Triplet loss augmented with batch hard from `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_. Args: margin (float): margin of triplet loss normalize_feature (bool, optional): if True, normalize features into unit norm first before computing loss. Default: False. """ def __init__(self, margin, normalize_feature=False): super(TripletLoss, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.margin_loss = nn.MarginRankingLoss(margin=margin) def forward(self, f, labels): if self.normalize_feature: f = F.normalize(f) dist_mat = pairwise_euclidean_distance(f, f) n = dist_mat.size(0) identity_mat = labels.expand(n, n).eq(labels.expand(n, n).t()).float() dist_ap, dist_an = hard_examples_mining(dist_mat, identity_mat) y = torch.ones_like(dist_ap) loss = self.margin_loss(dist_an, dist_ap, y) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0(in_out_ptr0 , in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp30 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 1e-12 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = libdevice.sqrt(tmp28) tmp32 = tmp30 == tmp31 tmp33 = tmp32.to(tl.float32) tmp34 = 1.0 tmp35 = tmp34 - tmp33 tmp36 = -10000000.0 tmp37 = tmp35 * tmp36 tmp38 = tmp29 + tmp37 tmp39 = r1 tmp40 = tmp39.to(tl.int16) tmp41 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp42 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43, _tmp44 = triton_helpers.sort_with_index(tmp41, tmp42, None, 1, stable=False, descending=True) tmp45 = 10000000.0 tmp46 = tmp33 * tmp45 tmp47 = tmp29 + tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp49, _tmp50 = triton_helpers.sort_with_index(tmp48, tmp42, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp26, xmask) tl.store(out_ptr0 + (r1 + 4 * x0), tmp43, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp49, xmask) @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 4.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = tmp11 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0[grid(4) ](buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_add_clamp_min_mean_mul_neg_sub_1[grid(1)](buf7, buf4, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf4 return buf7, def hard_examples_mining(dist_mat, identity_mat, return_idxes=False): """Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_ Args: dist_mat (tensor): pairwise distance matrix between two sets of features identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and :math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\\_mat[i, j] = 1`, otherwise :math:`identity\\_mat[i, j] = 0` return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False """ sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + -10000000.0 * (1 - identity_mat), dim=1, descending=True) dist_ap = sorted_dist_mat[:, 0] hard_positive_idxes = sorted_idxes[:, 0] sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 10000000.0 * identity_mat, dim=1, descending=False) dist_an = sorted_dist_mat[:, 0] hard_negative_idxes = sorted_idxes[:, 0] if return_idxes: return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes return dist_ap, dist_an def pairwise_euclidean_distance(x, y): """Compute pairwise euclidean distance between two sets of features""" m, n = x.size(0), y.size(0) dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() - 2 * torch.matmul(x, y.t()) dist_mat = dist_mat.clamp(min=1e-12).sqrt() return dist_mat class TripletLossNew(nn.Module): """Triplet loss augmented with batch hard from `In defense of the Triplet Loss for Person Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_. Args: margin (float): margin of triplet loss normalize_feature (bool, optional): if True, normalize features into unit norm first before computing loss. Default: False. """ def __init__(self, margin, normalize_feature=False): super(TripletLossNew, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.margin_loss = nn.MarginRankingLoss(margin=margin) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
TripletLoss
false
10,148
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
PytorchMultiClass
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 32), (32, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 2048, grid=grid(2048), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PytorchMultiClass(nn.Module): """num_features as input parameter attributes: layer_1: fully-connected layer with 32 neurons layer_out: fully-connected layer with 4 neurons softmax: softmax function methods: forward() with inputs as input parameter, perform ReLU and DropOut on the fully-connected layer followed by the output layer with softmax""" def __init__(self, num_features): super(PytorchMultiClass, self).__init__() self.layer_1 = nn.Linear(num_features, 32) self.layer_out = nn.Linear(32, 4) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = F.dropout(F.relu(self.layer_1(x)), training=self.training) x = self.layer_out(x) return self.softmax(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 32), (32, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0 ), buf4, primals_4, buf5 class PytorchMultiClassNew(nn.Module): """num_features as input parameter attributes: layer_1: fully-connected layer with 32 neurons layer_out: fully-connected layer with 4 neurons softmax: softmax function methods: forward() with inputs as input parameter, perform ReLU and DropOut on the fully-connected layer followed by the output layer with softmax""" def __init__(self, num_features): super(PytorchMultiClassNew, self).__init__() self.layer_1 = nn.Linear(num_features, 32) self.layer_out = nn.Linear(32, 4) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.layer_1.weight primals_2 = self.layer_1.bias primals_4 = self.layer_out.weight primals_5 = self.layer_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
freescania/advdsi_at2
PytorchMultiClass
false
10,149
[ "MIT" ]
0
13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
https://github.com/freescania/advdsi_at2/tree/13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
PytorchBinary
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7z/c7zsuucunqdovb2xa6tywxjxwmolzjzdk72ratro7fi3qvgyqb7c.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 256), (256, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [relu, x], Original ATen: [aten.relu, aten.native_dropout] buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 1), (1, 256), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf6, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PytorchBinary(nn.Module): def __init__(self, num_features): super(PytorchBinary, self).__init__() self.layer_1 = nn.Linear(num_features, 256) self.layer_out = nn.Linear(256, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): x = F.dropout(F.relu(self.layer_1(x))) x = self.sigmoid(self.layer_out(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 256), (256, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 1), (1, 256), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf5 triton_poi_fused_sigmoid_1[grid(64)](buf6, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf6, primals_4, buf7 class PytorchBinaryNew(nn.Module): def __init__(self, num_features): super(PytorchBinaryNew, self).__init__() self.layer_1 = nn.Linear(num_features, 256) self.layer_out = nn.Linear(256, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.layer_1.weight primals_2 = self.layer_1.bias primals_4 = self.layer_out.weight primals_5 = self.layer_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
freescania/advdsi_at2
PytorchBinary
false
10,150
[ "MIT" ]
0
13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
https://github.com/freescania/advdsi_at2/tree/13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
PytorchRegression
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 128), (128, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 8192, grid=grid(8192), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [relu, x], Original ATen: [aten.relu, aten.native_dropout] buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_5 return (reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PytorchRegression(nn.Module): def __init__(self, num_features): super(PytorchRegression, self).__init__() self.layer_1 = nn.Linear(num_features, 128) self.layer_out = nn.Linear(128, 1) def forward(self, x): x = F.dropout(F.relu(self.layer_1(x))) x = self.layer_out(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 128), (128, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf7, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_5 return reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), primals_4, buf7 class PytorchRegressionNew(nn.Module): def __init__(self, num_features): super(PytorchRegressionNew, self).__init__() self.layer_1 = nn.Linear(num_features, 128) self.layer_out = nn.Linear(128, 1) def forward(self, input_0): primals_1 = self.layer_1.weight primals_2 = self.layer_1.bias primals_4 = self.layer_out.weight primals_5 = self.layer_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
freescania/advdsi_at2
PytorchRegression
false
10,151
[ "MIT" ]
0
13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
https://github.com/freescania/advdsi_at2/tree/13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
AvgPoolHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class AvgPoolHead(nn.Module): def __init__(self, in_channels, out_channels, fea_map_size): super(AvgPoolHead, self).__init__() self.avgpool = nn.AvgPool2d(fea_map_size, stride=1) self.fc = nn.Linear(in_channels, out_channels) def forward(self, x): x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'fea_map_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf1) del primals_2 del primals_3 return buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0) class AvgPoolHeadNew(nn.Module): def __init__(self, in_channels, out_channels, fea_map_size): super(AvgPoolHeadNew, self).__init__() self.avgpool = nn.AvgPool2d(fea_map_size, stride=1) self.fc = nn.Linear(in_channels, out_channels) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
harshitbansal05/integral-human-pose
AvgPoolHead
false
10,152
[ "MIT" ]
0
50c32b59d765afe3ab2c3873068d3adfb8fd9b13
https://github.com/harshitbansal05/integral-human-pose/tree/50c32b59d765afe3ab2c3873068d3adfb8fd9b13
KarankEtAl
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/pf/cpf3tohdpwovztgbrcmqotqxtuesimszab37pwyalhgjaydjzxta.py # Topologically Sorted Source Nodes: [conv3d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 11808768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 246016) % 12 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rk/crkdj4hoxy7y5bjtjpest45dzl5u53lzu367l2ref22lhlnqg2rr.py # Topologically Sorted Source Nodes: [conv3d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv3d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 33177600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x5 = xindex x2 = (xindex // 230400) % 36 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x5), None) tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, None) tl.store(out_ptr1 + (x0 + (3712*x4)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7z/c7zwos2zpmm6oq3ip6ft3mzyjbm3huriciqbgc32bdqtziin5a3p.py # Topologically Sorted Source Nodes: [conv3d_1, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.view] # Source node to ATen node mapping: # conv3d_1 => convolution_1 # x_1 => relu_1 # x_2 => view # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [-1, 144]), kwargs = {}) triton_poi_fused_convolution_relu_view_2 = async_compile.triton('triton_poi_fused_convolution_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 33177600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + ((3616*(x0 // 3600)) + (x0 % 3600)), None) tl.store(out_ptr0 + (x0), tmp0, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (12, 1, 1, 3, 3), (9, 9, 9, 3, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (36, 12, 1, 3, 3), (108, 9, 9, 3, 1)) assert_size_stride(primals_5, (36, ), (1, )) assert_size_stride(primals_6, (24, 144), (144, 1)) assert_size_stride(primals_7, (24, ), (1, )) assert_size_stride(primals_8, (4, 24), (24, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 64, 62, 62), (2952192, 246016, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv3d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 11808768, grid=grid(11808768), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 36, 64, 60, 60), (8294400, 230400, 3600, 60, 1)) buf3 = empty_strided_cuda((4, 36, 64, 60, 60), (8331264, 231424, 3616, 60, 1), torch.float32) buf7 = empty_strided_cuda((4, 36, 64, 60, 60), (8552448, 237568, 3712, 60, 1), torch.bool) # Topologically Sorted Source Nodes: [conv3d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf7, 33177600, grid=grid(33177600), stream=stream0) del primals_5 buf4 = reinterpret_tensor(buf2, (230400, 144), (144, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [conv3d_1, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.view] triton_poi_fused_convolution_relu_view_2.run(buf3, buf4, 33177600, grid=grid(33177600), stream=stream0) del buf3 buf5 = empty_strided_cuda((230400, 24), (24, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (144, 24), (1, 144), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((230400, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (24, 4), (1, 24), 0), alpha=1, beta=1, out=buf6) del primals_9 return (buf6, primals_1, primals_3, primals_4, buf1, buf4, buf5, primals_8, primals_6, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 1, 1, 3, 3), (9, 9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((36, 12, 1, 3, 3), (108, 9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((36, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((24, 144), (144, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 24), (24, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class KarankEtAl(nn.Module): def __init__(self, input_channels, n_classes, patch_size=5): super(KarankEtAl, self).__init__() self.patch_size = patch_size self.input_channels = input_channels self.n_classes = n_classes self.conv1 = nn.Conv3d(1, 3 * self.input_channels, (1, 3, 3)) self.conv2 = nn.Conv3d(3 * self.input_channels, 9 * self. input_channels, (1, 3, 3)) self.features_size = self._get_final_flattened_size() self.fc1 = nn.Linear(self.features_size, 6 * self.input_channels) self.fc2 = nn.Linear(6 * self.input_channels, self.n_classes) def _get_final_flattened_size(self): with torch.no_grad(): x = torch.zeros((1, 1, self.input_channels, self.patch_size, self.patch_size)) x = self.conv1(x) x = self.conv2(x) _, t, c, w, h = x.size() return t * c * w * h def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = x.view(-1, self.features_size) x = self.fc1(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {'input_channels': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 246016 % 12 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x5 = xindex x2 = xindex // 230400 % 36 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x5, None) tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, None) tl.store(out_ptr1 + (x0 + 3712 * x4), tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (3616 * (x0 // 3600) + x0 % 3600), None) tl.store(out_ptr0 + x0, tmp0, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (12, 1, 1, 3, 3), (9, 9, 9, 3, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (36, 12, 1, 3, 3), (108, 9, 9, 3, 1)) assert_size_stride(primals_5, (36,), (1,)) assert_size_stride(primals_6, (24, 144), (144, 1)) assert_size_stride(primals_7, (24,), (1,)) assert_size_stride(primals_8, (4, 24), (24, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 64, 62, 62), (2952192, 246016, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(11808768)](buf1, primals_2, 11808768, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 36, 64, 60, 60), (8294400, 230400, 3600, 60, 1)) buf3 = empty_strided_cuda((4, 36, 64, 60, 60), (8331264, 231424, 3616, 60, 1), torch.float32) buf7 = empty_strided_cuda((4, 36, 64, 60, 60), (8552448, 237568, 3712, 60, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(33177600)]( buf2, primals_5, buf3, buf7, 33177600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = reinterpret_tensor(buf2, (230400, 144), (144, 1), 0) del buf2 triton_poi_fused_convolution_relu_view_2[grid(33177600)](buf3, buf4, 33177600, XBLOCK=1024, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((230400, 24), (24, 1), torch.float32) extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (144, 24), (1, 144), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((230400, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (24, 4), (1, 24), 0), alpha=1, beta=1, out=buf6) del primals_9 return (buf6, primals_1, primals_3, primals_4, buf1, buf4, buf5, primals_8, primals_6, buf7) class KarankEtAlNew(nn.Module): def __init__(self, input_channels, n_classes, patch_size=5): super(KarankEtAlNew, self).__init__() self.patch_size = patch_size self.input_channels = input_channels self.n_classes = n_classes self.conv1 = nn.Conv3d(1, 3 * self.input_channels, (1, 3, 3)) self.conv2 = nn.Conv3d(3 * self.input_channels, 9 * self. input_channels, (1, 3, 3)) self.features_size = self._get_final_flattened_size() self.fc1 = nn.Linear(self.features_size, 6 * self.input_channels) self.fc2 = nn.Linear(6 * self.input_channels, self.n_classes) def _get_final_flattened_size(self): with torch.no_grad(): x = torch.zeros((1, 1, self.input_channels, self.patch_size, self.patch_size)) x = self.conv1(x) x = self.conv2(x) _, t, c, w, h = x.size() return t * c * w * h def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
giorgosouz/HSI-classification-makantasis-cnn
KarankEtAl
false
10,153
[ "MIT" ]
0
95f18274d7cb67babb971db71f358a73dee2affc
https://github.com/giorgosouz/HSI-classification-makantasis-cnn/tree/95f18274d7cb67babb971db71f358a73dee2affc
FactorTransfer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/t5/ct56alqgoynhiombzpq5fm5wf3zh32egabawyhxhvug4d66ecouu.py # Topologically Sorted Source Nodes: [normalize, normalize_1, sub], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub] # Source node to ATen node mapping: # normalize => div, pow_2, sum_1 # normalize_1 => div_1, pow_5, sum_2 # sub => sub # Graph fragment: # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %expand), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %expand_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) triton_per_fused_div_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp5 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp8 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 / tmp11 tmp30 = tmp29 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp35 = libdevice.sqrt(tmp17) tmp36 = 1e-12 tmp37 = triton_helpers.maximum(tmp35, tmp36) tmp38 = tmp12 / tmp37 tmp39 = libdevice.sqrt(tmp34) tmp40 = triton_helpers.maximum(tmp39, tmp36) tmp41 = tmp29 / tmp40 tmp42 = tmp38 - tmp41 tl.store(out_ptr2 + (r1 + (16*x0)), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gl/cglmfxdo76j2ab7llzszfnvwdkitvivbxyr5x7kknis3tn2lhrjb.py # Topologically Sorted Source Nodes: [abs_1, mean_2], Original ATen: [aten.abs, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # mean_2 => mean_2 # Graph fragment: # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_mean_1 = async_compile.triton('triton_per_fused_abs_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize, normalize_1, sub], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub] stream0 = get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_sub_0.run(arg0_1, arg1_1, buf2, 4, 16, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [abs_1, mean_2], Original ATen: [aten.abs, aten.mean] triton_per_fused_abs_mean_1.run(buf4, buf2, 1, 64, grid=grid(1), stream=stream0) del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class FactorTransfer(nn.Module): """Paraphrasing Complex Network: Network Compression via Factor Transfer, NeurIPS 2018""" def __init__(self, p1=2, p2=1): super(FactorTransfer, self).__init__() self.p1 = p1 self.p2 = p2 def forward(self, f_s, f_t): return self.factor_loss(f_s, f_t) def factor_loss(self, f_s, f_t): s_H, t_H = f_s.shape[2], f_t.shape[2] if s_H > t_H: f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H)) elif s_H < t_H: f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H)) else: pass if self.p2 == 1: return (self.factor(f_s) - self.factor(f_t)).abs().mean() else: return (self.factor(f_s) - self.factor(f_t)).pow(self.p2).mean() def factor(self, f): return F.normalize(f.pow(self.p1).mean(1).view(f.size(0), -1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp8 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 / tmp11 tmp30 = tmp29 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp35 = libdevice.sqrt(tmp17) tmp36 = 1e-12 tmp37 = triton_helpers.maximum(tmp35, tmp36) tmp38 = tmp12 / tmp37 tmp39 = libdevice.sqrt(tmp34) tmp40 = triton_helpers.maximum(tmp39, tmp36) tmp41 = tmp29 / tmp40 tmp42 = tmp38 - tmp41 tl.store(out_ptr2 + (r1 + 16 * x0), tmp42, xmask) @triton.jit def triton_per_fused_abs_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_sub_0[grid(4)](arg0_1, arg1_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_abs_mean_1[grid(1)](buf4, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf4, class FactorTransferNew(nn.Module): """Paraphrasing Complex Network: Network Compression via Factor Transfer, NeurIPS 2018""" def __init__(self, p1=2, p2=1): super(FactorTransferNew, self).__init__() self.p1 = p1 self.p2 = p2 def factor_loss(self, f_s, f_t): s_H, t_H = f_s.shape[2], f_t.shape[2] if s_H > t_H: f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H)) elif s_H < t_H: f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H)) else: pass if self.p2 == 1: return (self.factor(f_s) - self.factor(f_t)).abs().mean() else: return (self.factor(f_s) - self.factor(f_t)).pow(self.p2).mean() def factor(self, f): return F.normalize(f.pow(self.p1).mean(1).view(f.size(0), -1)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bobo0810/RepDistiller
FactorTransfer
false
10,154
[ "BSD-2-Clause" ]
0
0a4cea2142221b9b31c8e995920273f5619b37f8
https://github.com/bobo0810/RepDistiller/tree/0a4cea2142221b9b31c8e995920273f5619b37f8
RepresentationSubspaceDistance
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5u/c5uatbyxw4saiq2z4wmhyfdulktctq3pev2zqg2dk5os3jldzji4.py # Topologically Sorted Source Nodes: [pow_1, sub, sine, rsd], Original ATen: [aten.pow, aten.rsub, aten.sqrt, aten.linalg_vector_norm] # Source node to ATen node mapping: # pow_1 => pow_1 # rsd => abs_1, sum_1 # sine => sqrt # sub => sub # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%getitem_7, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sqrt,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, None), kwargs = {}) triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = libdevice.sqrt(tmp3) tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/iz/cizdsycgzb6gtivtvt57dj4s7lzma5egfwoha5c2bauqmmwiydvl.py # Topologically Sorted Source Nodes: [rsd, abs_1, abs_2, sub_1, bmp, mul, add], Original ATen: [aten.linalg_vector_norm, aten.abs, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # abs_1 => abs_2 # abs_2 => abs_3 # add => add # bmp => pow_4, pow_5, sum_2 # mul => mul # rsd => pow_3 # sub_1 => sub_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%getitem_6,), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%permute_5,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_2, %abs_3), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, None), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %mul), kwargs = {}) triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1 = async_compile.triton('triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp9 = tl.load(in_out_ptr0 + (0)) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, 1]) tmp1 = tl_math.abs(tmp0) tmp3 = tl_math.abs(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp11 = libdevice.sqrt(tmp8) tmp12 = 0.1 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [svd], Original ATen: [aten._linalg_svd] buf0 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0)) del arg0_1 buf1 = buf0[0] del buf0 # Topologically Sorted Source Nodes: [svd_1], Original ATen: [aten._linalg_svd] buf4 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0)) del arg1_1 buf5 = buf4[0] del buf4 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf5, out=buf8) del buf1 del buf5 # Topologically Sorted Source Nodes: [svd_2], Original ATen: [aten._linalg_svd] buf9 = torch.ops.aten._linalg_svd.default(buf8) del buf8 buf10 = buf9[0] buf11 = buf9[1] buf12 = buf9[2] del buf9 buf13 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sub, sine, rsd], Original ATen: [aten.pow, aten.rsub, aten.sqrt, aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0.run(buf11, buf13, 1, 4, grid=grid(1), stream=stream0) del buf11 buf15 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [rsd, abs_1, abs_2, sub_1, bmp, mul, add], Original ATen: [aten.linalg_vector_norm, aten.abs, aten.sub, aten.mul, aten.add] triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1.run(buf15, buf10, buf12, 1, 16, grid=grid(1), stream=stream0) del buf10 del buf12 return (buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class RepresentationSubspaceDistance(nn.Module): """ `Representation Subspace Distance (ICML 2021) <http://ise.thss.tsinghua.edu.cn/~mlong/doc/Representation-Subspace-Distance-for-Domain-Adaptation-Regression-icml21.pdf>`_ Args: trade_off (float): The trade-off value between Representation Subspace Distance and Base Mismatch Penalization. Default: 0.1 Inputs: - f_s (tensor): feature representations on source domain, :math:`f^s` - f_t (tensor): feature representations on target domain, :math:`f^t` """ def __init__(self, trade_off=0.1): super(RepresentationSubspaceDistance, self).__init__() self.trade_off = trade_off def forward(self, f_s, f_t): U_s, _, _ = torch.svd(f_s.t()) U_t, _, _ = torch.svd(f_t.t()) P_s, cosine, P_t = torch.svd(torch.mm(U_s.t(), U_t)) sine = torch.sqrt(1 - torch.pow(cosine, 2)) rsd = torch.norm(sine, 1) bmp = torch.norm(torch.abs(P_s) - torch.abs(P_t), 2) return rsd + self.trade_off * bmp def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = libdevice.sqrt(tmp3) tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) @triton.jit def triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp9 = tl.load(in_out_ptr0 + 0) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, 1]) tmp1 = tl_math.abs(tmp0) tmp3 = tl_math.abs(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp11 = libdevice.sqrt(tmp8) tmp12 = 0.1 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0)) del arg0_1 buf1 = buf0[0] del buf0 buf4 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0)) del arg1_1 buf5 = buf4[0] del buf4 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf5, out=buf8) del buf1 del buf5 buf9 = torch.ops.aten._linalg_svd.default(buf8) del buf8 buf10 = buf9[0] buf11 = buf9[1] buf12 = buf9[2] del buf9 buf13 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_linalg_vector_norm_pow_rsub_sqrt_0[grid(1)](buf11, buf13, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf11 buf15 = buf13 del buf13 triton_per_fused_abs_add_linalg_vector_norm_mul_sub_1[grid(1)](buf15, buf10, buf12, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf12 return buf15, class RepresentationSubspaceDistanceNew(nn.Module): """ `Representation Subspace Distance (ICML 2021) <http://ise.thss.tsinghua.edu.cn/~mlong/doc/Representation-Subspace-Distance-for-Domain-Adaptation-Regression-icml21.pdf>`_ Args: trade_off (float): The trade-off value between Representation Subspace Distance and Base Mismatch Penalization. Default: 0.1 Inputs: - f_s (tensor): feature representations on source domain, :math:`f^s` - f_t (tensor): feature representations on target domain, :math:`f^t` """ def __init__(self, trade_off=0.1): super(RepresentationSubspaceDistanceNew, self).__init__() self.trade_off = trade_off def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
XianyuanLiu/Transfer-Learning-Library
RepresentationSubspaceDistance
false
10,155
[ "MIT" ]
0
25f83f32437032df88ca6101ecd1f63ec7a0aa2c
https://github.com/XianyuanLiu/Transfer-Learning-Library/tree/25f83f32437032df88ca6101ecd1f63ec7a0aa2c
Correlation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hl/chlb6erzxkaz5er2ywfwkvvgncd76mgltyvsn22epwb3ucdjvihq.py # Topologically Sorted Source Nodes: [mul, sum_1, loss], Original ATen: [aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # loss => mean # mul => mul # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_1, %slice_2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) triton_per_fused_mean_mul_sum_0 = async_compile.triton('triton_per_fused_mean_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), rmask, other=0.0) tmp4 = tl.load(in_ptr0 + (64 + r0 + (64*r1)), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + (64 + r0 + (64*r1)), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), rmask, other=0.0) tmp10 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), rmask, other=0.0) tmp13 = tl.load(in_ptr0 + (80 + r0 + (64*r1)), rmask, other=0.0) tmp14 = tl.load(in_ptr1 + (80 + r0 + (64*r1)), rmask, other=0.0) tmp19 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), rmask, other=0.0) tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), rmask, other=0.0) tmp23 = tl.load(in_ptr0 + (96 + r0 + (64*r1)), rmask, other=0.0) tmp24 = tl.load(in_ptr1 + (96 + r0 + (64*r1)), rmask, other=0.0) tmp29 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), rmask, other=0.0) tmp30 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), rmask, other=0.0) tmp33 = tl.load(in_ptr0 + (112 + r0 + (64*r1)), rmask, other=0.0) tmp34 = tl.load(in_ptr1 + (112 + r0 + (64*r1)), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 * tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp15 = tmp13 - tmp14 tmp16 = tl_math.abs(tmp15) tmp17 = tmp12 * tmp16 tmp18 = tmp8 + tmp17 tmp21 = tmp19 - tmp20 tmp22 = tl_math.abs(tmp21) tmp25 = tmp23 - tmp24 tmp26 = tl_math.abs(tmp25) tmp27 = tmp22 * tmp26 tmp28 = tmp18 + tmp27 tmp31 = tmp29 - tmp30 tmp32 = tl_math.abs(tmp31) tmp35 = tmp33 - tmp34 tmp36 = tl_math.abs(tmp35) tmp37 = tmp32 * tmp36 tmp38 = tmp28 + tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.where(rmask, tmp39, 0) tmp42 = tl.sum(tmp41, 1)[:, None] tmp43 = 48.0 tmp44 = tmp42 / tmp43 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp44, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, sum_1, loss], Original ATen: [aten.mul, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_sum_0.run(buf2, arg0_1, arg1_1, 1, 48, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Correlation(nn.Module): """Correlation Congruence for Knowledge Distillation, ICCV 2019. The authors nicely shared the code with me. I restructured their code to be compatible with my running framework. Credits go to the original author""" def __init__(self): super(Correlation, self).__init__() def forward(self, f_s, f_t): delta = torch.abs(f_s - f_t) loss = torch.mean((delta[:-1] * delta[1:]).sum(1)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), rmask, other=0.0) tmp4 = tl.load(in_ptr0 + (64 + r0 + 64 * r1), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + (64 + r0 + 64 * r1), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), rmask, other=0.0) tmp10 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), rmask, other=0.0) tmp13 = tl.load(in_ptr0 + (80 + r0 + 64 * r1), rmask, other=0.0) tmp14 = tl.load(in_ptr1 + (80 + r0 + 64 * r1), rmask, other=0.0) tmp19 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), rmask, other=0.0) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), rmask, other=0.0) tmp23 = tl.load(in_ptr0 + (96 + r0 + 64 * r1), rmask, other=0.0) tmp24 = tl.load(in_ptr1 + (96 + r0 + 64 * r1), rmask, other=0.0) tmp29 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), rmask, other=0.0) tmp30 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), rmask, other=0.0) tmp33 = tl.load(in_ptr0 + (112 + r0 + 64 * r1), rmask, other=0.0) tmp34 = tl.load(in_ptr1 + (112 + r0 + 64 * r1), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 * tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp15 = tmp13 - tmp14 tmp16 = tl_math.abs(tmp15) tmp17 = tmp12 * tmp16 tmp18 = tmp8 + tmp17 tmp21 = tmp19 - tmp20 tmp22 = tl_math.abs(tmp21) tmp25 = tmp23 - tmp24 tmp26 = tl_math.abs(tmp25) tmp27 = tmp22 * tmp26 tmp28 = tmp18 + tmp27 tmp31 = tmp29 - tmp30 tmp32 = tl_math.abs(tmp31) tmp35 = tmp33 - tmp34 tmp36 = tl_math.abs(tmp35) tmp37 = tmp32 * tmp36 tmp38 = tmp28 + tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.where(rmask, tmp39, 0) tmp42 = tl.sum(tmp41, 1)[:, None] tmp43 = 48.0 tmp44 = tmp42 / tmp43 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp44, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_mean_mul_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 48, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class CorrelationNew(nn.Module): """Correlation Congruence for Knowledge Distillation, ICCV 2019. The authors nicely shared the code with me. I restructured their code to be compatible with my running framework. Credits go to the original author""" def __init__(self): super(CorrelationNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bobo0810/RepDistiller
Correlation
false
10,156
[ "BSD-2-Clause" ]
0
0a4cea2142221b9b31c8e995920273f5619b37f8
https://github.com/bobo0810/RepDistiller/tree/0a4cea2142221b9b31c8e995920273f5619b37f8
GATMutiHeadAttLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wm/cwm4p6i4onfgzirdknbkkkkalyi643y6ynzifzggmplzobgfpkd4.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] # Source node to ATen node mapping: # h => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] # Source node to ATen node mapping: # h => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ly/clyfgwr3kgfvzdrx7xlpotc6oloj5h6navonpmk2vckseyw4usqc.py # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # f_1 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y3/cy3wm5wimywhlsxa7hk245bnhzixbek5lhjzyfdljso5bghiwcxe.py # Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.add, aten.leaky_relu] # Source node to ATen node mapping: # e => add # e_1 => gt # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_6, %permute), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/k3/ck3xqrrmz6ytndzhozkjj3tngffffolpg5hkyfy7sazleuihua3b.py # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => amax # e => add # e_1 => mul, where # gt => gt_1 # zero_vec => full_default # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_6, %permute), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%expand_8, 0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [2], True), kwargs = {}) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4 = async_compile.triton('triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (16*x3)), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + (4*x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x4), xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (4 + x0 + (16*x3)), xmask).to(tl.int1) tmp15 = tl.load(in_ptr2 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (8 + x0 + (16*x3)), xmask).to(tl.int1) tmp24 = tl.load(in_ptr2 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (12 + x0 + (16*x3)), xmask).to(tl.int1) tmp33 = tl.load(in_ptr2 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 0.2 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp12 > tmp1 tmp16 = tmp15 + tmp5 tmp17 = tmp16 * tmp7 tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.where(tmp13, tmp18, tmp10) tmp20 = triton_helpers.maximum(tmp11, tmp19) tmp22 = tmp21 > tmp1 tmp25 = tmp24 + tmp5 tmp26 = tmp25 * tmp7 tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp10) tmp29 = triton_helpers.maximum(tmp20, tmp28) tmp31 = tmp30 > tmp1 tmp34 = tmp33 + tmp5 tmp35 = tmp34 * tmp7 tmp36 = tl.where(tmp32, tmp34, tmp35) tmp37 = tl.where(tmp31, tmp36, tmp10) tmp38 = triton_helpers.maximum(tmp29, tmp37) tl.store(out_ptr0 + (x4), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yr/cyr7i5nhebw2mdrqchsbmm36famsndlulvdtcjmaluysorcvauuq.py # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => exp, sub # e => add # e_1 => mul, where # gt => gt_1 # zero_vec => full_default # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_6, %permute), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%expand_8, 0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5 = async_compile.triton('triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 16 x5 = xindex x6 = (xindex // 4) x0 = xindex % 4 x7 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + (4*x7)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0 + (4*x7)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 0.2 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp11 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ry/cryfskax4soldfj4gnwnzu4snp73auwk43upvbh6hu5xizgjdsfb.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ex/cexaxrfhntxmaz67nvmgl6ywjxoqm4zb3egskp6h5sbnnge7lksf.py # Topologically Sorted Source Nodes: [node_out_2, squeeze], Original ATen: [aten.elu, aten.squeeze] # Source node to ATen node mapping: # node_out_2 => expm1, gt_2, mul_2, mul_4, where_2 # squeeze => squeeze # Graph fragment: # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_12, 0), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_2, %mul_4), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%where_2,), kwargs = {}) triton_poi_fused_elu_squeeze_7 = async_compile.triton('triton_poi_fused_elu_squeeze_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_squeeze_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_squeeze_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(x0 // 4)) + (64*x2) + (x0 % 4)), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_3, buf1, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_4, buf3, 64, grid=grid(64), stream=stream0) del primals_4 buf4 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_2], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_5, buf5, 64, grid=grid(64), stream=stream0) del primals_5 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_2], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.add, aten.leaky_relu] triton_poi_fused_add_leaky_relu_3.run(buf4, buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4.run(primals_2, buf7, buf4, buf6, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5.run(primals_2, buf7, buf4, buf6, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf4 del buf6 del buf8 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_6.run(buf9, buf10, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [node_out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf2, out=buf11) buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [node_out_2, squeeze], Original ATen: [aten.elu, aten.squeeze] triton_poi_fused_elu_squeeze_7.run(buf11, buf12, 256, grid=grid(256), stream=stream0) return (buf12, primals_2, buf7, buf10, buf11, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F class GATMutiHeadAttLayer(nn.Module): def __init__(self, in_features, out_features, heads, dropout=0.4, alpha =0.2, concat=True): super(GATMutiHeadAttLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.heads = heads self.alpha = alpha self.concat = concat self.gain = nn.init.calculate_gain('leaky_relu', self.alpha) self.leakyrelu = nn.LeakyReLU(self.alpha) self.W = nn.Parameter(torch.zeros(size=(heads, in_features, out_features))) nn.init.xavier_uniform_(self.W.data, self.gain) self.a_1 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_1.data, self.gain) self.a_2 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_2.data, self.gain) def forward(self, input_seq, adj): if len(input_seq.size()) == 2: input_seq = torch.unsqueeze(input_seq, 0) adj = torch.unsqueeze(adj, 0) input_seq = torch.unsqueeze(input_seq, 1) adj = torch.unsqueeze(adj, 1) in_size = input_seq.size() nbatchs = in_size[0] slen = in_size[2] h = torch.matmul(input_seq, self.W) f_1 = torch.matmul(h, self.a_1) f_2 = torch.matmul(h, self.a_2) e = f_1.expand(nbatchs, self.heads, slen, slen) + f_2.expand(nbatchs, self.heads, slen, slen).transpose(2, 3) e = self.leakyrelu(e) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj.expand(nbatchs, self.heads, slen, slen) > 0, e, zero_vec) attention = F.softmax(attention, dim=2) attention = F.dropout(attention, self.dropout, training=self.training) node_out = torch.matmul(attention, h) if self.concat: node_out = node_out.transpose(1, 2).contiguous().view(nbatchs, slen, -1) node_out = F.elu(node_out) else: node_out = node_out.mean(1) return node_out.squeeze() def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x3), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (4 + x0 + 16 * x3), xmask).to(tl.int1) tmp15 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (8 + x0 + 16 * x3), xmask).to(tl.int1) tmp24 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr1 + (12 + x0 + 16 * x3), xmask).to(tl.int1) tmp33 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 0.2 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp12 > tmp1 tmp16 = tmp15 + tmp5 tmp17 = tmp16 * tmp7 tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.where(tmp13, tmp18, tmp10) tmp20 = triton_helpers.maximum(tmp11, tmp19) tmp22 = tmp21 > tmp1 tmp25 = tmp24 + tmp5 tmp26 = tmp25 * tmp7 tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp10) tmp29 = triton_helpers.maximum(tmp20, tmp28) tmp31 = tmp30 > tmp1 tmp34 = tmp33 + tmp5 tmp35 = tmp34 * tmp7 tmp36 = tl.where(tmp32, tmp34, tmp35) tmp37 = tl.where(tmp31, tmp36, tmp10) tmp38 = triton_helpers.maximum(tmp29, tmp37) tl.store(out_ptr0 + x4, tmp38, xmask) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 x0 = xindex % 4 x7 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + x5, xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + 4 * x7), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr4 + (x0 + 4 * x7), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 0.2 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp11 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_elu_squeeze_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (x0 // 4) + 64 * x2 + x0 % 4), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_3, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](primals_4, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](primals_5, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_3[grid(256)](buf4, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4[grid(64)]( primals_2, buf7, buf4, buf6, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf1 del buf1 triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5[grid(256)]( primals_2, buf7, buf4, buf6, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del buf6 del buf8 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_6[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0) del buf9 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf2, out=buf11) buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_elu_squeeze_7[grid(256)](buf11, buf12, 256, XBLOCK =128, num_warps=4, num_stages=1) return buf12, primals_2, buf7, buf10, buf11, reinterpret_tensor(buf2, ( 16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0) class GATMutiHeadAttLayerNew(nn.Module): def __init__(self, in_features, out_features, heads, dropout=0.4, alpha =0.2, concat=True): super(GATMutiHeadAttLayerNew, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.heads = heads self.alpha = alpha self.concat = concat self.gain = nn.init.calculate_gain('leaky_relu', self.alpha) self.leakyrelu = nn.LeakyReLU(self.alpha) self.W = nn.Parameter(torch.zeros(size=(heads, in_features, out_features))) nn.init.xavier_uniform_(self.W.data, self.gain) self.a_1 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_1.data, self.gain) self.a_2 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_2.data, self.gain) def forward(self, input_0, input_1): primals_1 = self.W primals_4 = self.a_1 primals_5 = self.a_2 primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gitubee/pyGAT
GATMutiHeadAttLayer
false
10,157
[ "MIT" ]
0
bc4cc2b6565b7f2ad99daf88013207f64991c273
https://github.com/gitubee/pyGAT/tree/bc4cc2b6565b7f2ad99daf88013207f64991c273
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [logp], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logp => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/s5/cs5wshnrdka3xma3btqijhothwpkw4ctmtyvsdzkv6seotnt4jpf.py # Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean] # Source node to ATen node mapping: # logp => exp, log, mul, neg, sub_1, sum_1, sum_2 # loss => mul_1 # mean => mean # neg => neg_1 # p => exp_1 # pow_1 => pow_1 # sub => sub_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = -tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = 1.0 tmp31 = tmp30 - tmp29 tmp32 = tmp30 * tmp27 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 64.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logp], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean] triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class FocalLoss(nn.Module): def __init__(self, gamma=0, eps=1e-07): super(FocalLoss, self).__init__() self.gamma = gamma self.eps = eps self.ce = torch.nn.CrossEntropyLoss(reduction='none') def forward(self, input, target): logp = self.ce(input, target) p = torch.exp(-logp) loss = (1 - p) ** self.gamma * logp return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = -tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = 1.0 tmp30 - tmp29 tmp32 = tmp30 * tmp27 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 64.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)]( buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf3, class FocalLossNew(nn.Module): def __init__(self, gamma=0, eps=1e-07): super(FocalLossNew, self).__init__() self.gamma = gamma self.eps = eps self.ce = torch.nn.CrossEntropyLoss(reduction='none') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
h8c2/kaggle-landmark-recognition-2020-1st-place
FocalLoss
false
10,158
[ "MIT" ]
0
3285b6c9548d100b14800ea3927f5974b25facd9
https://github.com/h8c2/kaggle-landmark-recognition-2020-1st-place/tree/3285b6c9548d100b14800ea3927f5974b25facd9
BasicNN
# AOT ID: ['1_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wm/cwmlpspwkzmiv4efvkrjowkoawwpe42tnpzkiacrjumkzfjsy3qc.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div] # Source node to ATen node mapping: # x_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, 255.0), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sp/cspbvdtjzgrogiqrdvxgtnc5djfqn22gfjy4t6u5773zfamx7rey.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div_1, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (2, 784), (784, 1)) assert_size_stride(primals_3, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 28, 28), (784, 1, 28, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (1, 784), (0, 1), 0), reinterpret_tensor(primals_2, (784, 2), (1, 784), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf4 = empty_strided_cuda((1, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf1, buf4, 1, 2, grid=grid(1), stream=stream0) del buf1 return (buf4, reinterpret_tensor(buf0, (1, 784), (784, 1), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn from torch.autograd import Variable import torch.nn.functional as F class BasicNN(nn.Module): def __init__(self): super(BasicNN, self).__init__() self.net = nn.Linear(28 * 28, 2) def forward(self, x): if type(x) == np.ndarray: x = torch.from_numpy(x) x = x.float() x = Variable(x) x = x.view(1, 1, 28, 28) x = x / 255.0 batch_size = x.size(0) x = x.view(batch_size, -1) output = self.net(x.float()) return F.softmax(output) def get_inputs(): return [torch.rand([1, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (2, 784), (784, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 28, 28), (784, 1, 28, 1), torch. float32) get_raw_stream(0) triton_poi_fused_div_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (1, 784), (0, 1), 0), reinterpret_tensor(primals_2, (784, 2), (1, 784), 0 ), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf4 = empty_strided_cuda((1, 2), (2, 1), torch.float32) triton_per_fused__softmax_1[grid(1)](buf1, buf4, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf4, reinterpret_tensor(buf0, (1, 784), (784, 1), 0), buf4 class BasicNNNew(nn.Module): def __init__(self): super(BasicNNNew, self).__init__() self.net = nn.Linear(28 * 28, 2) def forward(self, input_0): primals_2 = self.net.weight primals_3 = self.net.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gtfierro/clipper
BasicNN
false
10,159
[ "Apache-2.0" ]
0
88d7c238d51d5cf66d118bffca0c17edee84755e
https://github.com/gtfierro/clipper/tree/88d7c238d51d5cf66d118bffca0c17edee84755e
PositionalEncoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/i5/ci5qztmjm5trfof5nnr4q6zfdgt5kz3i6kpes2s3qrkgziadh2ag.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.add] # Source node to ATen node mapping: # x => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %slice_2), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 5000, 4), (20000, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 5000, 4), (20000, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PositionalEncoding(nn.Module): """Implement the PE function.""" def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = nn.Parameter(torch.randn(1, max_len, d_model)) self.register_parameter('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return self.dropout(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'dropout': 0.5}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 5000, 4), (20000, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class PositionalEncodingNew(nn.Module): """Implement the PE function.""" def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncodingNew, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = nn.Parameter(torch.randn(1, max_len, d_model)) self.register_parameter('pe', pe) def forward(self, input_0): primals_1 = self.pe primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
hedinang/ocr2
PositionalEncoding
false
10,160
[ "MIT" ]
0
09cc4c71190e900c6ad5aba9485a804139281fec
https://github.com/hedinang/ocr2/tree/09cc4c71190e900c6ad5aba9485a804139281fec
NormalizationLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5d/c5dhk7bkuqo4u6hoo6g4wm36lekpofplwrblvshjxay6uhmurm2w.py # Topologically Sorted Source Nodes: [mul, features], Original ATen: [aten.mul, aten.div] # Source node to ATen node mapping: # features => div # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %expand), kwargs = {}) triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp3 / tmp15 tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, features], Original ATen: [aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class NormalizationLayer(torch.nn.Module): """Class for normalization layer.""" def __init__(self, normalize_scale=1.0, learn_scale=True): super(NormalizationLayer, self).__init__() self.norm_s = float(normalize_scale) if learn_scale: self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,))) def forward(self, x): features = self.norm_s * x / torch.norm(x, dim=1, keepdim=True ).expand_as(x) return features def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp3 / tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class NormalizationLayerNew(torch.nn.Module): """Class for normalization layer.""" def __init__(self, normalize_scale=1.0, learn_scale=True): super(NormalizationLayerNew, self).__init__() self.norm_s = float(normalize_scale) if learn_scale: self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,))) def forward(self, input_0): primals_1 = self.norm_s primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
hmtrii/tirg
NormalizationLayer
false
10,161
[ "Apache-2.0" ]
0
e404020795bb46fb01b6bd82a2618f9370174012
https://github.com/hmtrii/tirg/tree/e404020795bb46fb01b6bd82a2618f9370174012
LabelSmoothCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [lsm], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # lsm => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xz/cxzrhhdsigxzd7l33hed2jl7lxvtwhqe4lxz7hmqo775cqjakfzl.py # Topologically Sorted Source Nodes: [targets, lsm, mul, sum_1, loss, loss_1], Original ATen: [aten.scatter, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean] # Source node to ATen node mapping: # loss => neg # loss_1 => mean # lsm => exp, log, sub_1, sum_1 # mul => mul # sum_1 => sum_2 # targets => scatter_upon_const_tensor # Graph fragment: # %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: 0.0, dtype: torch.float32, dim: 1, selector: %unsqueeze, val: 1.0}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_upon_const_tensor, %sub_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {}) triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (4*r2), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*r2)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (2 + (4*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (3 + (4*r2)), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp7 = tl_math.exp(tmp6) tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = tmp6 - tmp17 tmp19 = tmp5 * tmp18 tmp20 = tl.full([1, 1], 1, tl.int64) tmp21 = tmp0 == tmp20 tmp22 = tl.where(tmp21, tmp3, tmp4) tmp23 = tmp8 - tmp17 tmp24 = tmp22 * tmp23 tmp25 = tmp19 + tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp0 == tmp26 tmp28 = tl.where(tmp27, tmp3, tmp4) tmp29 = tmp11 - tmp17 tmp30 = tmp28 * tmp29 tmp31 = tmp25 + tmp30 tmp32 = tl.full([1, 1], 3, tl.int64) tmp33 = tmp0 == tmp32 tmp34 = tl.where(tmp33, tmp3, tmp4) tmp35 = tmp14 - tmp17 tmp36 = tmp34 * tmp35 tmp37 = tmp31 + tmp36 tmp38 = -tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lsm], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [targets, lsm, mul, sum_1, loss, loss_1], Original ATen: [aten.scatter, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean] triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1.run(buf3, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class LabelSmoothCrossEntropyLoss(nn.modules.loss._WeightedLoss): def __init__(self, weight=None, reduction='mean', smoothing=0.0): super().__init__(weight=weight, reduction=reduction) self.smoothing = smoothing self.weight = weight self.reduction = reduction @staticmethod def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int', smoothing=0.0): assert 0 <= smoothing < 1 with torch.no_grad(): targets = torch.empty(size=(targets.size(0), n_classes), device =targets.device).fill_(smoothing / (n_classes - 1)).scatter_( 1, targets.data.unsqueeze(1), 1.0 - smoothing) return targets def forward(self, inputs, targets): targets = LabelSmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.size(-1), self.smoothing) lsm = F.log_softmax(inputs, -1) if self.weight is not None: lsm = lsm * self.weight.unsqueeze(0) loss = -(targets * lsm).sum(-1) if self.reduction == 'sum': loss = loss.sum() elif self.reduction == 'mean': loss = loss.mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 4 * r2, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * r2), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (2 + 4 * r2), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (3 + 4 * r2), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp7 = tl_math.exp(tmp6) tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = tmp6 - tmp17 tmp19 = tmp5 * tmp18 tmp20 = tl.full([1, 1], 1, tl.int64) tmp21 = tmp0 == tmp20 tmp22 = tl.where(tmp21, tmp3, tmp4) tmp23 = tmp8 - tmp17 tmp24 = tmp22 * tmp23 tmp25 = tmp19 + tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp0 == tmp26 tmp28 = tl.where(tmp27, tmp3, tmp4) tmp29 = tmp11 - tmp17 tmp30 = tmp28 * tmp29 tmp31 = tmp25 + tmp30 tmp32 = tl.full([1, 1], 3, tl.int64) tmp33 = tmp0 == tmp32 tmp34 = tl.where(tmp33, tmp3, tmp4) tmp35 = tmp14 - tmp17 tmp36 = tmp34 * tmp35 tmp37 = tmp31 + tmp36 tmp38 = -tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_scatter_sum_1[grid(1)](buf3, arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class LabelSmoothCrossEntropyLossNew(nn.modules.loss._WeightedLoss): def __init__(self, weight=None, reduction='mean', smoothing=0.0): super().__init__(weight=weight, reduction=reduction) self.smoothing = smoothing self.weight = weight self.reduction = reduction @staticmethod def _smooth_one_hot(targets: 'torch.Tensor', n_classes: 'int', smoothing=0.0): assert 0 <= smoothing < 1 with torch.no_grad(): targets = torch.empty(size=(targets.size(0), n_classes), device =targets.device).fill_(smoothing / (n_classes - 1)).scatter_( 1, targets.data.unsqueeze(1), 1.0 - smoothing) return targets def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gosiqueira/dog-breed-recognition
LabelSmoothCrossEntropyLoss
false
10,162
[ "MIT" ]
0
27d3499f4922e6e36219f47af08c34e30c929e12
https://github.com/gosiqueira/dog-breed-recognition/tree/27d3499f4922e6e36219f47af08c34e30c929e12
SoftArgmax2D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7d/c7dubrsigenrd2jmd6iwmxj4ghxpnyetwaywlk3uefyl3icppnek.py # Topologically Sorted Source Nodes: [max_1, sub, exp_x, mul_2, sum_1, add, exp_x_sum, mul_3, expected_x, mul, mul_1, expected_y], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum, aten.add, aten.reciprocal] # Source node to ATen node mapping: # add => add # exp_x => exp # exp_x_sum => mul, reciprocal # expected_x => sum_3 # expected_y => sum_2 # max_1 => max_1 # mul => mul_5 # mul_1 => mul_6 # mul_2 => mul_7 # mul_3 => mul_8 # sub => sub # sum_1 => sum_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, -1, True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem), kwargs = {}) # %exp : [num_users=3] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %exp), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1], True), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %exp), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %mul), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1], True), kwargs = {}) triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = r1 % 4 tmp12 = tmp11.to(tl.float32) tmp13 = 2.0 tmp14 = tmp12 < tmp13 tmp15 = 0.6666666666666666 tmp16 = tmp12 * tmp15 tmp17 = -1.0 tmp18 = tmp16 + tmp17 tmp19 = 3 + ((-1)*(r1 % 4)) tmp20 = tmp19.to(tl.float32) tmp21 = tmp20 * tmp15 tmp22 = 1.0 tmp23 = tmp22 - tmp21 tmp24 = tl.where(tmp14, tmp18, tmp23) tmp25 = tmp24 * tmp6 tmp26 = 1e-06 tmp27 = tmp10 + tmp26 tmp28 = tl.full([1, 1], 1, tl.int32) tmp29 = tmp28 / tmp27 tmp30 = tmp29 * tmp22 tmp31 = tmp25 * tmp30 tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = tl.where(xmask, tmp32, 0) tmp35 = tl.sum(tmp34, 1)[:, None] tmp36 = (r1 // 4) tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 < tmp13 tmp39 = tmp37 * tmp15 tmp40 = tmp39 + tmp17 tmp41 = 3 + ((-1)*(r1 // 4)) tmp42 = tmp41.to(tl.float32) tmp43 = tmp42 * tmp15 tmp44 = tmp22 - tmp43 tmp45 = tl.where(tmp38, tmp40, tmp44) tmp46 = tmp45 * tmp6 tmp47 = tmp46 * tmp30 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tl.store(out_ptr2 + (2*x0), tmp35, xmask) tl.store(out_ptr3 + (2*x0), tmp51, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf3 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 0) # alias buf4 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 1) # alias # Topologically Sorted Source Nodes: [max_1, sub, exp_x, mul_2, sum_1, add, exp_x_sum, mul_3, expected_x, mul, mul_1, expected_y], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum, aten.add, aten.reciprocal] stream0 = get_raw_stream(0) triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0.run(arg0_1, buf3, buf4, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from typing import Optional def create_meshgrid(x: 'torch.Tensor', normalized_coordinates: 'Optional[bool]' ) ->torch.Tensor: assert len(x.shape) == 4, x.shape _, _, height, width = x.shape _device, _dtype = x.device, x.dtype if normalized_coordinates: xs = torch.linspace(-1.0, 1.0, width, device=_device, dtype=_dtype) ys = torch.linspace(-1.0, 1.0, height, device=_device, dtype=_dtype) else: xs = torch.linspace(0, width - 1, width, device=_device, dtype=_dtype) ys = torch.linspace(0, height - 1, height, device=_device, dtype=_dtype ) return torch.meshgrid(ys, xs) class SoftArgmax2D(nn.Module): """Creates a module that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: normalized_coordinates (Optional[bool]): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples:: >>> input = torch.rand(1, 4, 2, 3) >>> m = tgm.losses.SpatialSoftArgmax2d() >>> coords = m(input) # 1x4x2 >>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2) """ def __init__(self, normalized_coordinates: 'Optional[bool]'=True) ->None: super(SoftArgmax2D, self).__init__() self.normalized_coordinates: 'Optional[bool]' = normalized_coordinates self.eps: 'float' = 1e-06 def forward(self, input: 'torch.Tensor') ->torch.Tensor: if not torch.is_tensor(input): raise TypeError('Input input type is not a torch.Tensor. Got {}' .format(type(input))) if not len(input.shape) == 4: raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}' .format(input.shape)) batch_size, channels, _height, _width = input.shape x: 'torch.Tensor' = input.view(batch_size, channels, -1) exp_x = torch.exp(x - torch.max(x, dim=-1, keepdim=True)[0]) exp_x_sum = 1.0 / (exp_x.sum(dim=-1, keepdim=True) + self.eps) pos_y, pos_x = create_meshgrid(input, self.normalized_coordinates) pos_x = pos_x.reshape(-1) pos_y = pos_y.reshape(-1) expected_y: 'torch.Tensor' = torch.sum(pos_y * exp_x * exp_x_sum, dim=-1, keepdim=True) expected_x: 'torch.Tensor' = torch.sum(pos_x * exp_x * exp_x_sum, dim=-1, keepdim=True) output: 'torch.Tensor' = torch.cat([expected_x, expected_y], dim=-1) return output.view(batch_size, channels, 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = r1 % 4 tmp12 = tmp11.to(tl.float32) tmp13 = 2.0 tmp14 = tmp12 < tmp13 tmp15 = 0.6666666666666666 tmp16 = tmp12 * tmp15 tmp17 = -1.0 tmp18 = tmp16 + tmp17 tmp19 = 3 + -1 * (r1 % 4) tmp20 = tmp19.to(tl.float32) tmp21 = tmp20 * tmp15 tmp22 = 1.0 tmp23 = tmp22 - tmp21 tmp24 = tl.where(tmp14, tmp18, tmp23) tmp25 = tmp24 * tmp6 tmp26 = 1e-06 tmp27 = tmp10 + tmp26 tmp28 = tl.full([1, 1], 1, tl.int32) tmp29 = tmp28 / tmp27 tmp30 = tmp29 * tmp22 tmp31 = tmp25 * tmp30 tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = tl.where(xmask, tmp32, 0) tmp35 = tl.sum(tmp34, 1)[:, None] tmp36 = r1 // 4 tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 < tmp13 tmp39 = tmp37 * tmp15 tmp40 = tmp39 + tmp17 tmp41 = 3 + -1 * (r1 // 4) tmp42 = tmp41.to(tl.float32) tmp43 = tmp42 * tmp15 tmp44 = tmp22 - tmp43 tmp45 = tl.where(tmp38, tmp40, tmp44) tmp46 = tmp45 * tmp6 tmp47 = tmp46 * tmp30 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tl.store(out_ptr2 + 2 * x0, tmp35, xmask) tl.store(out_ptr3 + 2 * x0, tmp51, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf3 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 0) buf4 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 1) get_raw_stream(0) triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0[grid(16)](arg0_1, buf3, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf5, def create_meshgrid(x: 'torch.Tensor', normalized_coordinates: 'Optional[bool]' ) ->torch.Tensor: assert len(x.shape) == 4, x.shape _, _, height, width = x.shape _device, _dtype = x.device, x.dtype if normalized_coordinates: xs = torch.linspace(-1.0, 1.0, width, device=_device, dtype=_dtype) ys = torch.linspace(-1.0, 1.0, height, device=_device, dtype=_dtype) else: xs = torch.linspace(0, width - 1, width, device=_device, dtype=_dtype) ys = torch.linspace(0, height - 1, height, device=_device, dtype=_dtype ) return torch.meshgrid(ys, xs) class SoftArgmax2DNew(nn.Module): """Creates a module that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: normalized_coordinates (Optional[bool]): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples:: >>> input = torch.rand(1, 4, 2, 3) >>> m = tgm.losses.SpatialSoftArgmax2d() >>> coords = m(input) # 1x4x2 >>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2) """ def __init__(self, normalized_coordinates: 'Optional[bool]'=True) ->None: super(SoftArgmax2DNew, self).__init__() self.normalized_coordinates: 'Optional[bool]' = normalized_coordinates self.eps: 'float' = 1e-06 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
godspeed5/Human-Path-Prediction
SoftArgmax2D
false
10,163
[ "MIT" ]
0
1f451f3750fbd4e37a567f1574cfea1456608be8
https://github.com/godspeed5/Human-Path-Prediction/tree/1f451f3750fbd4e37a567f1574cfea1456608be8
GAT
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wm/cwm4p6i4onfgzirdknbkkkkalyi643y6ynzifzggmplzobgfpkd4.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] # Source node to ATen node mapping: # h => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] # Source node to ATen node mapping: # h => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ly/clyfgwr3kgfvzdrx7xlpotc6oloj5h6navonpmk2vckseyw4usqc.py # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # f_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y3/cy3wm5wimywhlsxa7hk245bnhzixbek5lhjzyfdljso5bghiwcxe.py # Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.add, aten.leaky_relu] # Source node to ATen node mapping: # e => add # e_1 => gt # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_6, %permute), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7e/c7eahtu74tabuwhtmhir4xoeioy74v7443cegtljvfw5qmvjo2bi.py # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => amax # e => add # e_1 => mul, where # gt => gt_1 # zero_vec => full_default # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_6, %permute), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%expand_8, 0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [2], True), kwargs = {}) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4 = async_compile.triton('triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (16*x3)), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + (4*x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x4), xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (4 + x0 + (16*x3)), xmask).to(tl.int1) tmp15 = tl.load(in_ptr2 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (8 + x0 + (16*x3)), xmask).to(tl.int1) tmp24 = tl.load(in_ptr2 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (12 + x0 + (16*x3)), xmask).to(tl.int1) tmp33 = tl.load(in_ptr2 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp12 > tmp1 tmp16 = tmp15 + tmp5 tmp17 = tmp16 * tmp7 tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.where(tmp13, tmp18, tmp10) tmp20 = triton_helpers.maximum(tmp11, tmp19) tmp22 = tmp21 > tmp1 tmp25 = tmp24 + tmp5 tmp26 = tmp25 * tmp7 tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp10) tmp29 = triton_helpers.maximum(tmp20, tmp28) tmp31 = tmp30 > tmp1 tmp34 = tmp33 + tmp5 tmp35 = tmp34 * tmp7 tmp36 = tl.where(tmp32, tmp34, tmp35) tmp37 = tl.where(tmp31, tmp36, tmp10) tmp38 = triton_helpers.maximum(tmp29, tmp37) tl.store(out_ptr0 + (x4), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lj/cljxuntmnenxoatyfkl2wnultawghu7fzbpet65xyrqfalyseezv.py # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => exp, sub # e => add # e_1 => mul, where # gt => gt_1 # zero_vec => full_default # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_6, %permute), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%expand_8, 0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5 = async_compile.triton('triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 16 x5 = xindex x6 = (xindex // 4) x0 = xindex % 4 x7 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + (4*x7)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0 + (4*x7)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp11 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ry/cryfskax4soldfj4gnwnzu4snp73auwk43upvbh6hu5xizgjdsfb.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6o/c6ok77ksgas3rstbkha5rrfp43acarpyyv55flibh7fcam4rgxlj.py # Topologically Sorted Source Nodes: [node_out_2], Original ATen: [aten.elu] # Source node to ATen node mapping: # node_out_2 => expm1, gt_2, mul_2, mul_4, where_2 # Graph fragment: # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_12, 0), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_2, %mul_4), kwargs = {}) triton_poi_fused_elu_7 = async_compile.triton('triton_poi_fused_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(x0 // 4)) + (64*x2) + (x0 % 4)), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6o/c6ohqgr2tjtrit3z5tdoilakx57zkmsldaneqh5bdciz3c2leiff.py # Topologically Sorted Source Nodes: [e_2, e_3], Original ATen: [aten.add, aten.leaky_relu] # Source node to ATen node mapping: # e_2 => add_1 # e_3 => gt_3 # Graph fragment: # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_17, %permute_2), kwargs = {}) # %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_8 = async_compile.triton('triton_poi_fused_add_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/h6/ch6wjp52qyiehk6g5e6ho65vlfmvcxbxeeixzsyf5fvrnbkmkmas.py # Topologically Sorted Source Nodes: [e_2, e_3, zero_vec_1, gt_1, attention_3, attention_4], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention_3 => where_4 # attention_4 => amax_1 # e_2 => add_1 # e_3 => mul_5, where_3 # gt_1 => gt_4 # zero_vec_1 => full_default_1 # Graph fragment: # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_17, %permute_2), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %add_1, %mul_5), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%expand_19, 0), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %where_3, %full_default_1), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [2], True), kwargs = {}) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9 = async_compile.triton('triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp14 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask).to(tl.int1) tmp15 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp23 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask).to(tl.int1) tmp24 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp32 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask).to(tl.int1) tmp33 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp12 > tmp1 tmp16 = tmp15 + tmp5 tmp17 = tmp16 * tmp7 tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.where(tmp13, tmp18, tmp10) tmp20 = triton_helpers.maximum(tmp11, tmp19) tmp22 = tmp21 > tmp1 tmp25 = tmp24 + tmp5 tmp26 = tmp25 * tmp7 tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp10) tmp29 = triton_helpers.maximum(tmp20, tmp28) tmp31 = tmp30 > tmp1 tmp34 = tmp33 + tmp5 tmp35 = tmp34 * tmp7 tmp36 = tl.where(tmp32, tmp34, tmp35) tmp37 = tl.where(tmp31, tmp36, tmp10) tmp38 = triton_helpers.maximum(tmp29, tmp37) tl.store(out_ptr0 + (x2), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ku/ckup5sgxhixvuua4azlt3kif75tgdsyr53z5uf37coenvczsrbx7.py # Topologically Sorted Source Nodes: [e_2, e_3, zero_vec_1, gt_1, attention_3, attention_4], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention_3 => where_4 # attention_4 => exp_1, sub_1 # e_2 => add_1 # e_3 => mul_5, where_3 # gt_1 => gt_4 # zero_vec_1 => full_default_1 # Graph fragment: # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_17, %permute_2), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %add_1, %mul_5), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%expand_19, 0), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %where_3, %full_default_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10 = async_compile.triton('triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr1 + (x3), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp11 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + (x3), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3z/c3zgnuanxwcgu7p7l5olmm6aovhopbgtqwuvx4bd776p4q3lrxvd.py # Topologically Sorted Source Nodes: [attention_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_4 => div_1, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) triton_poi_fused__softmax_11 = async_compile.triton('triton_poi_fused__softmax_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zr/czrpykxj37m4yvh3xamnf2tacephngho3gjq2lbnvpgpkd3euq52.py # Topologically Sorted Source Nodes: [node_out_4, squeeze_1, x_3, log_softmax], Original ATen: [aten.mean, aten.squeeze, aten.elu, aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax_2, exp_2, sub_2, sum_3 # node_out_4 => mean # squeeze_1 => squeeze_1 # x_3 => expm1_1, gt_5, mul_7, mul_9, where_5 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_24, [1]), kwargs = {}) # %squeeze_1 : [num_users=2] = call_function[target=torch.ops.aten.squeeze.default](args = (%mean,), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_1, 0), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_7,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %mul_7, %mul_9), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_5, [1], True), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_5, %amax_2), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) triton_poi_fused__log_softmax_elu_mean_squeeze_12 = async_compile.triton('triton_poi_fused__log_softmax_elu_mean_squeeze_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_elu_mean_squeeze_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_elu_mean_squeeze_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = tmp2 * tmp1 tmp6 = libdevice.expm1(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp5, tmp7) tmp10 = tmp9 / tmp1 tmp11 = tmp10 > tmp3 tmp12 = tmp10 * tmp1 tmp13 = libdevice.expm1(tmp12) tmp14 = tmp13 * tmp1 tmp15 = tl.where(tmp11, tmp12, tmp14) tmp16 = triton_helpers.maximum(tmp8, tmp15) tmp18 = tmp17 / tmp1 tmp19 = tmp18 > tmp3 tmp20 = tmp18 * tmp1 tmp21 = libdevice.expm1(tmp20) tmp22 = tmp21 * tmp1 tmp23 = tl.where(tmp19, tmp20, tmp22) tmp24 = triton_helpers.maximum(tmp16, tmp23) tmp26 = tmp25 / tmp1 tmp27 = tmp26 > tmp3 tmp28 = tmp26 * tmp1 tmp29 = libdevice.expm1(tmp28) tmp30 = tmp29 * tmp1 tmp31 = tl.where(tmp27, tmp28, tmp30) tmp32 = triton_helpers.maximum(tmp24, tmp31) tmp33 = tmp8 - tmp32 tmp34 = tl_math.exp(tmp33) tmp35 = tmp15 - tmp32 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tmp23 - tmp32 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tmp41 = tmp31 - tmp32 tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tl.store(out_ptr0 + (x2), tmp32, xmask) tl.store(out_ptr1 + (x2), tmp43, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/m5/cm5zf4dboj3pmxzkj2a726aanlmjjwiea7agvkgjd6ykwjuttvbx.py # Topologically Sorted Source Nodes: [node_out_4, squeeze_1, x_3, log_softmax], Original ATen: [aten.mean, aten.squeeze, aten.elu, aten._log_softmax] # Source node to ATen node mapping: # log_softmax => log, sub_2, sub_3 # node_out_4 => mean # squeeze_1 => squeeze_1 # x_3 => expm1_1, gt_5, mul_7, mul_9, where_5 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_24, [1]), kwargs = {}) # %squeeze_1 : [num_users=2] = call_function[target=torch.ops.aten.squeeze.default](args = (%mean,), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_1, 0), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_7,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %mul_7, %mul_9), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_5, %amax_2), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log), kwargs = {}) triton_poi_fused__log_softmax_elu_mean_squeeze_13 = async_compile.triton('triton_poi_fused__log_softmax_elu_mean_squeeze_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_elu_mean_squeeze_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_elu_mean_squeeze_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp9 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = tmp2 * tmp1 tmp6 = libdevice.expm1(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp5, tmp7) tmp10 = tmp8 - tmp9 tmp12 = tl_math.log(tmp11) tmp13 = tmp10 - tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (1, 16, 4), (64, 4, 1)) assert_size_stride(primals_7, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_3, buf1, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_4, buf3, 64, grid=grid(64), stream=stream0) del primals_4 buf4 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_2], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_5, buf5, 64, grid=grid(64), stream=stream0) del primals_5 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_2], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.add, aten.leaky_relu] triton_poi_fused_add_leaky_relu_3.run(buf4, buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4.run(primals_2, buf7, buf4, buf6, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [e, e_1, zero_vec, gt, attention, attention_1], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5.run(primals_2, buf7, buf4, buf6, buf8, buf9, 256, grid=grid(256), stream=stream0) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_6.run(buf9, buf10, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [node_out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf2, out=buf11) buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [node_out_2], Original ATen: [aten.elu] triton_poi_fused_elu_7.run(buf11, buf12, 256, grid=grid(256), stream=stream0) buf13 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf12, reinterpret_tensor(primals_6, (4, 16, 4), (0, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_3], Original ATen: [aten.bmm] extern_kernels.bmm(buf13, reinterpret_tensor(primals_7, (4, 4, 1), (0, 1, 0), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_4], Original ATen: [aten.bmm] extern_kernels.bmm(buf13, reinterpret_tensor(primals_8, (4, 4, 1), (0, 1, 0), 0), out=buf15) buf16 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_2, e_3], Original ATen: [aten.add, aten.leaky_relu] triton_poi_fused_add_leaky_relu_8.run(buf14, buf15, buf16, 64, grid=grid(64), stream=stream0) buf17 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [e_2, e_3, zero_vec_1, gt_1, attention_3, attention_4], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9.run(primals_2, buf16, buf14, buf15, buf17, 16, grid=grid(16), stream=stream0) buf18 = reinterpret_tensor(buf6, (4, 1, 4, 4), (16, 64, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [e_2, e_3, zero_vec_1, gt_1, attention_3, attention_4], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10.run(primals_2, buf16, buf14, buf15, buf17, buf18, 64, grid=grid(64), stream=stream0) del buf14 buf19 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [attention_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_11.run(buf18, buf19, 64, grid=grid(64), stream=stream0) buf20 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [node_out_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0), buf13, out=buf20) buf21 = reinterpret_tensor(buf17, (4, 1, 4), (4, 16, 1), 0); del buf17 # reuse buf22 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [node_out_4, squeeze_1, x_3, log_softmax], Original ATen: [aten.mean, aten.squeeze, aten.elu, aten._log_softmax] triton_poi_fused__log_softmax_elu_mean_squeeze_12.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [node_out_4, squeeze_1, x_3, log_softmax], Original ATen: [aten.mean, aten.squeeze, aten.elu, aten._log_softmax] triton_poi_fused__log_softmax_elu_mean_squeeze_13.run(buf20, buf21, buf22, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 return (buf23, primals_2, primals_6, primals_7, primals_8, buf7, buf10, buf11, buf16, buf19, buf20, buf23, reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf12, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 16, 4), (64, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F class GATMutiHeadAttLayer(nn.Module): def __init__(self, in_features, out_features, heads, dropout=0.4, alpha =0.2, concat=True): super(GATMutiHeadAttLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.heads = heads self.alpha = alpha self.concat = concat self.gain = nn.init.calculate_gain('leaky_relu', self.alpha) self.leakyrelu = nn.LeakyReLU(self.alpha) self.W = nn.Parameter(torch.zeros(size=(heads, in_features, out_features))) nn.init.xavier_uniform_(self.W.data, self.gain) self.a_1 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_1.data, self.gain) self.a_2 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_2.data, self.gain) def forward(self, input_seq, adj): if len(input_seq.size()) == 2: input_seq = torch.unsqueeze(input_seq, 0) adj = torch.unsqueeze(adj, 0) input_seq = torch.unsqueeze(input_seq, 1) adj = torch.unsqueeze(adj, 1) in_size = input_seq.size() nbatchs = in_size[0] slen = in_size[2] h = torch.matmul(input_seq, self.W) f_1 = torch.matmul(h, self.a_1) f_2 = torch.matmul(h, self.a_2) e = f_1.expand(nbatchs, self.heads, slen, slen) + f_2.expand(nbatchs, self.heads, slen, slen).transpose(2, 3) e = self.leakyrelu(e) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj.expand(nbatchs, self.heads, slen, slen) > 0, e, zero_vec) attention = F.softmax(attention, dim=2) attention = F.dropout(attention, self.dropout, training=self.training) node_out = torch.matmul(attention, h) if self.concat: node_out = node_out.transpose(1, 2).contiguous().view(nbatchs, slen, -1) node_out = F.elu(node_out) else: node_out = node_out.mean(1) return node_out.squeeze() class GAT(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GAT, self).__init__() self.dropout = dropout self.MHAlayer1 = GATMutiHeadAttLayer(nfeat, nhid, nheads, dropout, alpha) self.out_att = GATMutiHeadAttLayer(nhid * nheads, nclass, 1, dropout, alpha, concat=False) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = self.MHAlayer1(x, adj) x = F.dropout(x, self.dropout, training=self.training) x = F.elu(self.out_att(x, adj)) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 16 * x3), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (4 + x0 + 16 * x3), xmask).to(tl.int1) tmp15 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (8 + x0 + 16 * x3), xmask).to(tl.int1) tmp24 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr1 + (12 + x0 + 16 * x3), xmask).to(tl.int1) tmp33 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp12 > tmp1 tmp16 = tmp15 + tmp5 tmp17 = tmp16 * tmp7 tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.where(tmp13, tmp18, tmp10) tmp20 = triton_helpers.maximum(tmp11, tmp19) tmp22 = tmp21 > tmp1 tmp25 = tmp24 + tmp5 tmp26 = tmp25 * tmp7 tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp10) tmp29 = triton_helpers.maximum(tmp20, tmp28) tmp31 = tmp30 > tmp1 tmp34 = tmp33 + tmp5 tmp35 = tmp34 * tmp7 tmp36 = tl.where(tmp32, tmp34, tmp35) tmp37 = tl.where(tmp31, tmp36, tmp10) tmp38 = triton_helpers.maximum(tmp29, tmp37) tl.store(out_ptr0 + x4, tmp38, xmask) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 x0 = xindex % 4 x7 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + x5, xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + 4 * x7), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr4 + (x0 + 4 * x7), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp11 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (x0 // 4) + 64 * x2 + x0 % 4), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask) tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp14 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask).to(tl.int1) tmp15 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp23 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask).to(tl.int1) tmp24 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp32 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask).to(tl.int1) tmp33 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp12 > tmp1 tmp16 = tmp15 + tmp5 tmp17 = tmp16 * tmp7 tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.where(tmp13, tmp18, tmp10) tmp20 = triton_helpers.maximum(tmp11, tmp19) tmp22 = tmp21 > tmp1 tmp25 = tmp24 + tmp5 tmp26 = tmp25 * tmp7 tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp10) tmp29 = triton_helpers.maximum(tmp20, tmp28) tmp31 = tmp30 > tmp1 tmp34 = tmp33 + tmp5 tmp35 = tmp34 * tmp7 tmp36 = tl.where(tmp32, tmp34, tmp35) tmp37 = tl.where(tmp31, tmp36, tmp10) tmp38 = triton_helpers.maximum(tmp29, tmp37) tl.store(out_ptr0 + x2, tmp38, xmask) @triton.jit def triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr1 + x3, xmask).to(tl.int1) tmp4 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr4 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl.where(tmp3, tmp6, tmp8) tmp10 = -8999999815811072.0 tmp11 = tl.where(tmp2, tmp9, tmp10) tmp13 = tmp11 - tmp12 tmp14 = tl_math.exp(tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_elu_mean_squeeze_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = tmp2 * tmp1 tmp6 = libdevice.expm1(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp5, tmp7) tmp10 = tmp9 / tmp1 tmp11 = tmp10 > tmp3 tmp12 = tmp10 * tmp1 tmp13 = libdevice.expm1(tmp12) tmp14 = tmp13 * tmp1 tmp15 = tl.where(tmp11, tmp12, tmp14) tmp16 = triton_helpers.maximum(tmp8, tmp15) tmp18 = tmp17 / tmp1 tmp19 = tmp18 > tmp3 tmp20 = tmp18 * tmp1 tmp21 = libdevice.expm1(tmp20) tmp22 = tmp21 * tmp1 tmp23 = tl.where(tmp19, tmp20, tmp22) tmp24 = triton_helpers.maximum(tmp16, tmp23) tmp26 = tmp25 / tmp1 tmp27 = tmp26 > tmp3 tmp28 = tmp26 * tmp1 tmp29 = libdevice.expm1(tmp28) tmp30 = tmp29 * tmp1 tmp31 = tl.where(tmp27, tmp28, tmp30) tmp32 = triton_helpers.maximum(tmp24, tmp31) tmp33 = tmp8 - tmp32 tmp34 = tl_math.exp(tmp33) tmp35 = tmp15 - tmp32 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tmp23 - tmp32 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tmp41 = tmp31 - tmp32 tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tl.store(out_ptr0 + x2, tmp32, xmask) tl.store(out_ptr1 + x2, tmp43, xmask) @triton.jit def triton_poi_fused__log_softmax_elu_mean_squeeze_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = tmp2 * tmp1 tmp6 = libdevice.expm1(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp5, tmp7) tmp10 = tmp8 - tmp9 tmp12 = tl_math.log(tmp11) tmp13 = tmp10 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (1, 16, 4), (64, 4, 1)) assert_size_stride(primals_7, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_3, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](primals_4, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](primals_5, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_3[grid(256)](buf4, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_4[grid(64)]( primals_2, buf7, buf4, buf6, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf1 del buf1 triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_5[grid(256)]( primals_2, buf7, buf4, buf6, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_6[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0) del buf9 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf2, out=buf11) buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_elu_7[grid(256)](buf11, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0) del buf8 extern_kernels.bmm(buf12, reinterpret_tensor(primals_6, (4, 16, 4), (0, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(primals_7, (4, 4, 1), (0, 1, 0), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(primals_8, (4, 4, 1), (0, 1, 0), 0), out=buf15) buf16 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_8[grid(64)](buf14, buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_9[grid(16)]( primals_2, buf16, buf14, buf15, buf17, 16, XBLOCK=16, num_warps =1, num_stages=1) buf18 = reinterpret_tensor(buf6, (4, 1, 4, 4), (16, 64, 4, 1), 0) del buf6 triton_poi_fused__softmax_add_gt_leaky_relu_mul_where_10[grid(64)]( primals_2, buf16, buf14, buf15, buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 buf19 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_11[grid(64)](buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 extern_kernels.bmm(reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0), buf13, out=buf20) buf21 = reinterpret_tensor(buf17, (4, 1, 4), (4, 16, 1), 0) del buf17 buf22 = reinterpret_tensor(buf15, (4, 1, 4), (4, 16, 1), 0) del buf15 triton_poi_fused__log_softmax_elu_mean_squeeze_12[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_elu_mean_squeeze_13[grid(64)](buf20, buf21, buf22, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 return (buf23, primals_2, primals_6, primals_7, primals_8, buf7, buf10, buf11, buf16, buf19, buf20, buf23, reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf12, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0)) class GATMutiHeadAttLayer(nn.Module): def __init__(self, in_features, out_features, heads, dropout=0.4, alpha =0.2, concat=True): super(GATMutiHeadAttLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.heads = heads self.alpha = alpha self.concat = concat self.gain = nn.init.calculate_gain('leaky_relu', self.alpha) self.leakyrelu = nn.LeakyReLU(self.alpha) self.W = nn.Parameter(torch.zeros(size=(heads, in_features, out_features))) nn.init.xavier_uniform_(self.W.data, self.gain) self.a_1 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_1.data, self.gain) self.a_2 = nn.Parameter(torch.zeros(size=(heads, out_features, 1))) nn.init.xavier_uniform_(self.a_2.data, self.gain) def forward(self, input_seq, adj): if len(input_seq.size()) == 2: input_seq = torch.unsqueeze(input_seq, 0) adj = torch.unsqueeze(adj, 0) input_seq = torch.unsqueeze(input_seq, 1) adj = torch.unsqueeze(adj, 1) in_size = input_seq.size() nbatchs = in_size[0] slen = in_size[2] h = torch.matmul(input_seq, self.W) f_1 = torch.matmul(h, self.a_1) f_2 = torch.matmul(h, self.a_2) e = f_1.expand(nbatchs, self.heads, slen, slen) + f_2.expand(nbatchs, self.heads, slen, slen).transpose(2, 3) e = self.leakyrelu(e) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj.expand(nbatchs, self.heads, slen, slen) > 0, e, zero_vec) attention = F.softmax(attention, dim=2) attention = F.dropout(attention, self.dropout, training=self.training) node_out = torch.matmul(attention, h) if self.concat: node_out = node_out.transpose(1, 2).contiguous().view(nbatchs, slen, -1) node_out = F.elu(node_out) else: node_out = node_out.mean(1) return node_out.squeeze() class GATNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GATNew, self).__init__() self.dropout = dropout self.MHAlayer1 = GATMutiHeadAttLayer(nfeat, nhid, nheads, dropout, alpha) self.out_att = GATMutiHeadAttLayer(nhid * nheads, nclass, 1, dropout, alpha, concat=False) def forward(self, input_0, input_1): primals_1 = self.MHAlayer1.W primals_4 = self.MHAlayer1.a_1 primals_5 = self.MHAlayer1.a_2 primals_6 = self.out_att.W primals_7 = self.out_att.a_1 primals_8 = self.out_att.a_2 primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
gitubee/pyGAT
GAT
false
10,164
[ "MIT" ]
0
bc4cc2b6565b7f2ad99daf88013207f64991c273
https://github.com/gitubee/pyGAT/tree/bc4cc2b6565b7f2ad99daf88013207f64991c273
DistillKL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/mc/cmc44gqwlbgitm3uqkuiwz6fe3jirwculg7zmyndeuqzyyqzyok7.py # Topologically Sorted Source Nodes: [p_t], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_t => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xg/cxg6geasclvgycjnyaybokxud5rdp2fe6eropfaplher4ysvlw4g.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bn/cbnq3b4f3wkxjyazqbgxcpo4q6wewzigsbhu3mgms7fjutjswpex.py # Topologically Sorted Source Nodes: [p_t, kl_div, p_s, mul, loss], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # kl_div => eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # loss => div_3 # mul => mul_2 # p_s => exp, log, sub_1, sum_1 # p_t => div_2, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 16), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 4), kwargs = {}) triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (r3), None) tmp18 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 16.0 tmp37 = tmp35 * tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_t], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [p_t, kl_div, p_s, mul, loss], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class DistillKL(nn.Module): """Distilling the Knowledge in a Neural Network""" def __init__(self, T): super(DistillKL, self).__init__() self.T = T def forward(self, y_s, y_t): p_s = F.log_softmax(y_s / self.T, dim=1) p_t = F.softmax(y_t / self.T, dim=1) loss = F.kl_div(p_s, p_t, size_average=False ) * self.T ** 2 / y_s.shape[0] return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'T': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 16.0 tmp37 = tmp35 * tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class DistillKLNew(nn.Module): """Distilling the Knowledge in a Neural Network""" def __init__(self, T): super(DistillKLNew, self).__init__() self.T = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bobo0810/RepDistiller
DistillKL
false
10,165
[ "BSD-2-Clause" ]
0
0a4cea2142221b9b31c8e995920273f5619b37f8
https://github.com/bobo0810/RepDistiller/tree/0a4cea2142221b9b31c8e995920273f5619b37f8
PKT
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/us/cuss63los66cmuzjpei2dpgpl3bbx6cgwht7porec7hj3lclnazh.py # Topologically Sorted Source Nodes: [pow_2, sum_2, target_net_norm, add_1, target_net, setitem_1], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # add_1 => add_1 # pow_2 => pow_2 # setitem_1 => full_default_1, index_put_1 # sum_2 => sum_2 # target_net => div_1 # target_net_norm => sqrt_1 # Graph fragment: # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1], True), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-07), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %add_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%div_1, [%ne_1], %full_default_1), kwargs = {}) triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp16 = tmp15 != tmp15 tmp17 = 0.0 tmp18 = tl.where(tmp16, tmp17, tmp15) tl.store(in_out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yp/cypfp6dwhkdjjlgaluiwgyjiyan74ahdhbisu33n2ow6e3gmrfz3.py # Topologically Sorted Source Nodes: [add_3, target_similarity_1, sum_4, target_similarity_2, add_4, add_2, model_similarity_1, sum_3, model_similarity_2, add_5, truediv_6, log, mul, loss], Original ATen: [aten.add, aten.div, aten.sum, aten.log, aten.mul, aten.mean] # Source node to ATen node mapping: # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # log => log # loss => mean # model_similarity_1 => div_2 # model_similarity_2 => div_4 # mul => mul # sum_3 => sum_3 # sum_4 => sum_4 # target_similarity_1 => div_3 # target_similarity_2 => div_5 # truediv_6 => div_6 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, 1.0), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 2.0), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_3, [1], True), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, %sum_4), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_5, 1e-07), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, 1.0), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 2.0), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_2, [1], True), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, %sum_3), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_4, 1e-07), kwargs = {}) # %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_5), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_6,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_5, %log), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_add_div_log_mean_mul_sum_1 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), None) tmp5 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (r2), None) tmp26 = tl.load(in_ptr1 + (4*r1), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tmp6 * tmp3 tmp9 = tmp8 + tmp1 tmp10 = tmp9 * tmp3 tmp11 = tmp7 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp13 * tmp3 tmp15 = tmp11 + tmp14 tmp17 = tmp16 + tmp1 tmp18 = tmp17 * tmp3 tmp19 = tmp15 + tmp18 tmp20 = tmp4 / tmp19 tmp21 = 1e-07 tmp22 = tmp20 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp24 * tmp3 tmp27 = tmp26 + tmp1 tmp28 = tmp27 * tmp3 tmp30 = tmp29 + tmp1 tmp31 = tmp30 * tmp3 tmp32 = tmp28 + tmp31 tmp34 = tmp33 + tmp1 tmp35 = tmp34 * tmp3 tmp36 = tmp32 + tmp35 tmp38 = tmp37 + tmp1 tmp39 = tmp38 * tmp3 tmp40 = tmp36 + tmp39 tmp41 = tmp25 / tmp40 tmp42 = tmp41 + tmp21 tmp43 = tmp22 / tmp42 tmp44 = tl_math.log(tmp43) tmp45 = tmp20 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = tl.sum(tmp46, 1)[:, None] tmp49 = 16.0 tmp50 = tmp48 / tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp50, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [pow_2, sum_2, target_net_norm, add_1, target_net, setitem_1], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0.run(buf1, arg1_1, 16, grid=grid(16), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [target_similarity], Original ATen: [aten.mm] extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf4 = buf1; del buf1 # reuse buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [pow_1, sum_1, output_net_norm, add, output_net, setitem], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.lift_fresh, aten.index_put] triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0.run(buf5, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [model_similarity], Original ATen: [aten.mm] extern_kernels.mm(buf5, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6) del buf5 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [add_3, target_similarity_1, sum_4, target_similarity_2, add_4, add_2, model_similarity_1, sum_3, model_similarity_2, add_5, truediv_6, log, mul, loss], Original ATen: [aten.add, aten.div, aten.sum, aten.log, aten.mul, aten.mean] triton_per_fused_add_div_log_mean_mul_sum_1.run(buf8, buf2, buf6, 1, 16, grid=grid(1), stream=stream0) del buf2 del buf6 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PKT(nn.Module): """Probabilistic Knowledge Transfer for deep representation learning Code from author: https://github.com/passalis/probabilistic_kt""" def __init__(self): super(PKT, self).__init__() def forward(self, f_s, f_t): return self.cosine_similarity_loss(f_s, f_t) @staticmethod def cosine_similarity_loss(output_net, target_net, eps=1e-07): output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True)) output_net = output_net / (output_net_norm + eps) output_net[output_net != output_net] = 0 target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True)) target_net = target_net / (target_net_norm + eps) target_net[target_net != target_net] = 0 model_similarity = torch.mm(output_net, output_net.transpose(0, 1)) target_similarity = torch.mm(target_net, target_net.transpose(0, 1)) model_similarity = (model_similarity + 1.0) / 2.0 target_similarity = (target_similarity + 1.0) / 2.0 model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True) target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True) loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps))) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp16 = tmp15 != tmp15 tmp17 = 0.0 tmp18 = tl.where(tmp16, tmp17, tmp15) tl.store(in_out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_per_fused_add_div_log_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp5 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + r2, None) tmp26 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tmp6 * tmp3 tmp9 = tmp8 + tmp1 tmp10 = tmp9 * tmp3 tmp11 = tmp7 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp13 * tmp3 tmp15 = tmp11 + tmp14 tmp17 = tmp16 + tmp1 tmp18 = tmp17 * tmp3 tmp19 = tmp15 + tmp18 tmp20 = tmp4 / tmp19 tmp21 = 1e-07 tmp22 = tmp20 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp24 * tmp3 tmp27 = tmp26 + tmp1 tmp28 = tmp27 * tmp3 tmp30 = tmp29 + tmp1 tmp31 = tmp30 * tmp3 tmp32 = tmp28 + tmp31 tmp34 = tmp33 + tmp1 tmp35 = tmp34 * tmp3 tmp36 = tmp32 + tmp35 tmp38 = tmp37 + tmp1 tmp39 = tmp38 * tmp3 tmp40 = tmp36 + tmp39 tmp41 = tmp25 / tmp40 tmp42 = tmp41 + tmp21 tmp43 = tmp22 / tmp42 tmp44 = tl_math.log(tmp43) tmp45 = tmp20 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = tl.sum(tmp46, 1)[:, None] tmp49 = 16.0 tmp50 = tmp48 / tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp50, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0[grid(16)]( buf1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf4 = buf1 del buf1 buf5 = buf4 del buf4 triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0[grid(16)]( buf5, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6) del buf5 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused_add_div_log_mean_mul_sum_1[grid(1)](buf8, buf2, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf6 return buf8, class PKTNew(nn.Module): """Probabilistic Knowledge Transfer for deep representation learning Code from author: https://github.com/passalis/probabilistic_kt""" def __init__(self): super(PKTNew, self).__init__() @staticmethod def cosine_similarity_loss(output_net, target_net, eps=1e-07): output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True)) output_net = output_net / (output_net_norm + eps) output_net[output_net != output_net] = 0 target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True)) target_net = target_net / (target_net_norm + eps) target_net[target_net != target_net] = 0 model_similarity = torch.mm(output_net, output_net.transpose(0, 1)) target_similarity = torch.mm(target_net, target_net.transpose(0, 1)) model_similarity = (model_similarity + 1.0) / 2.0 target_similarity = (target_similarity + 1.0) / 2.0 model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True) target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True) loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps))) return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bobo0810/RepDistiller
PKT
false
10,166
[ "BSD-2-Clause" ]
0
0a4cea2142221b9b31c8e995920273f5619b37f8
https://github.com/bobo0810/RepDistiller/tree/0a4cea2142221b9b31c8e995920273f5619b37f8
GeM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/w7/cw76edgx2byvy63fwff7anzlambkufz45h3okmkxmcgppfib77jc.py # Topologically Sorted Source Nodes: [clamp, pow_1], Original ATen: [aten.clamp, aten.pow] # Source node to ATen node mapping: # clamp => clamp_min # pow_1 => pow_1 # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_2, 1e-06), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min, %primals_1), kwargs = {}) triton_poi_fused_clamp_pow_0 = async_compile.triton('triton_poi_fused_clamp_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp5 = libdevice.pow(tmp2, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bw/cbwgbppqaap3oywo4gn6wujtg6clpxlchs4m6kjxievrbuoqr6wh.py # Topologically Sorted Source Nodes: [avg_pool2d, truediv, pow_2], Original ATen: [aten.avg_pool2d, aten.reciprocal, aten.mul, aten.pow] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # pow_2 => pow_2 # truediv => mul, reciprocal # Graph fragment: # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [4, 4]), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%avg_pool2d, %mul), kwargs = {}) triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1 = async_compile.triton('triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (0)) tmp34 = tl.broadcast_to(tmp33, [XBLOCK]) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tmp35 = tl.full([1], 1, tl.int32) tmp36 = tmp35 / tmp34 tmp37 = 1.0 tmp38 = tmp36 * tmp37 tmp39 = libdevice.pow(tmp32, tmp38) tl.store(out_ptr0 + (x0), tmp32, xmask) tl.store(out_ptr1 + (x0), tmp39, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp, pow_1], Original ATen: [aten.clamp, aten.pow] stream0 = get_raw_stream(0) triton_poi_fused_clamp_pow_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d, truediv, pow_2], Original ATen: [aten.avg_pool2d, aten.reciprocal, aten.mul, aten.pow] triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1.run(buf0, primals_1, buf1, buf2, 16, grid=grid(16), stream=stream0) return (buf2, primals_1, primals_2, buf0, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F from torch.nn.parameter import Parameter def gem(x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow( 1.0 / p) class GeM(nn.Module): def __init__(self, p=3, eps=1e-06, p_trainable=True): super(GeM, self).__init__() if p_trainable: self.p = Parameter(torch.ones(1) * p) else: self.p = p self.eps = eps def forward(self, x): return gem(x, p=self.p, eps=self.eps) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self. p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import functional as F from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp5 = libdevice.pow(tmp2, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr1 + 0) tmp34 = tl.broadcast_to(tmp33, [XBLOCK]) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tmp35 = tl.full([1], 1, tl.int32) tmp36 = tmp35 / tmp34 tmp37 = 1.0 tmp38 = tmp36 * tmp37 tmp39 = libdevice.pow(tmp32, tmp38) tl.store(out_ptr0 + x0, tmp32, xmask) tl.store(out_ptr1 + x0, tmp39, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_pow_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_avg_pool2d_mul_pow_reciprocal_1[grid(16)](buf0, primals_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf2, primals_1, primals_2, buf0, buf1, buf2 def gem(x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow( 1.0 / p) class GeMNew(nn.Module): def __init__(self, p=3, eps=1e-06, p_trainable=True): super(GeMNew, self).__init__() if p_trainable: self.p = Parameter(torch.ones(1) * p) else: self.p = p self.eps = eps def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self. p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' def forward(self, input_0): primals_1 = self.p primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
h8c2/kaggle-landmark-recognition-2020-1st-place
GeM
false
10,167
[ "MIT" ]
0
3285b6c9548d100b14800ea3927f5974b25facd9
https://github.com/h8c2/kaggle-landmark-recognition-2020-1st-place/tree/3285b6c9548d100b14800ea3927f5974b25facd9
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/jw/cjwwa4uchk246nlrjndt3n65ojf447r3xpsci6idv5lw24heuhdo.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %repeat], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = (xindex // 8) x2 = (xindex // 32) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/gd/cgdtd7uw2iemby2kfb22fx3vkhdbrpyx2y2l6nq45fmox3ad7stv.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qs/cqsyda2m63ct5ijcfgcipyyfn273chi5d3kmpjuf5asa7h4wdpdv.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [o], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0), primals_1, out=buf6) del buf5 return (reinterpret_tensor(buf6, (4, 4), (4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf3, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class Attention(nn.Module): """ Applies an attention mechanism on the output features from the decoder. """ def __init__(self, dim): super(Attention, self).__init__() self.dim = dim self.linear1 = nn.Linear(dim * 2, dim) self.linear2 = nn.Linear(dim, 1, bias=False) def _init_hidden(self): nn.init.xavier_normal_(self.linear1.weight) nn.init.xavier_normal_(self.linear2.weight) def forward(self, hidden_state, encoder_outputs): """ Arguments: hidden_state {Variable} -- batch_size x dim encoder_outputs {Variable} -- batch_size x seq_len x dim Returns: Variable -- context vector of size batch_size x dim """ batch_size, seq_len, _ = encoder_outputs.size() hidden_state = hidden_state.unsqueeze(1).repeat(1, seq_len, 1) inputs = torch.cat((encoder_outputs, hidden_state), 2).view(-1, self.dim * 2) o = self.linear2(F.tanh(self.linear1(inputs))) e = o.view(batch_size, seq_len) alpha = F.softmax(e, dim=1) context = torch.bmm(alpha.unsqueeze(1), encoder_outputs).squeeze(1) return context def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = xindex // 8 x2 = xindex // 32 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0 ), primals_1, out=buf6) del buf5 return reinterpret_tensor(buf6, (4, 4), (4, 1), 0), reinterpret_tensor(buf0 , (16, 8), (8, 1), 0), buf2, buf3, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), primals_5 class AttentionNew(nn.Module): """ Applies an attention mechanism on the output features from the decoder. """ def __init__(self, dim): super(AttentionNew, self).__init__() self.dim = dim self.linear1 = nn.Linear(dim * 2, dim) self.linear2 = nn.Linear(dim, 1, bias=False) def _init_hidden(self): nn.init.xavier_normal_(self.linear1.weight) nn.init.xavier_normal_(self.linear2.weight) def forward(self, input_0, input_1): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_5 = self.linear2.weight primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gluver/video-caption.pytorch
Attention
false
10,168
[ "MIT" ]
0
15000246980e43f71a254ab3deeb91f0957309bb
https://github.com/gluver/video-caption.pytorch/tree/15000246980e43f71a254ab3deeb91f0957309bb
EnDown
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/th/cthqlniy4mbxjtqgoswgn6zqs5myflemsufxzkxf36iypdzc2sql.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 8) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 2, 2, 2), (32, 8, 4, 2, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 32, grid=grid(32), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 2, 2, 2), (8, 4, 2, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class EnDown(nn.Module): def __init__(self, in_channels, out_channels): super(EnDown, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=2, padding=1) def forward(self, x): y = self.conv(x) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 2, 2, 2), (32, 8, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(32)](buf1, primals_2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 2, 2, 2), (8, 4, 2, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class EnDownNew(nn.Module): def __init__(self, in_channels, out_channels): super(EnDownNew, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=2, padding=1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
felixquinton1/TransBTS
EnDown
false
10,169
[ "Apache-2.0" ]
0
6992c902413ba15f40ebfe9f6d5d0e3594051033
https://github.com/felixquinton1/TransBTS/tree/6992c902413ba15f40ebfe9f6d5d0e3594051033
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nh/cnh6iem5ybdb3twbxsrnswdgv7527qrbvucjhftq2p4nhirlo2lo.py # Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # distances => sum_1 # pow_1 => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yu/cyuyrkbzs3boq5rfip75kejn76bd4kpeieqcygt7f7quogzu4q5u.py # Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # losses => mul_3 # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_2 => pow_2 # relu => relu # sqrt => sqrt # sub_1 => sub_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %sum_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, -1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-09), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %sqrt), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {}) triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1 = async_compile.triton('triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r2), None) tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = -1.0 tmp4 = tmp0 * tmp3 tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = 1e-09 tmp8 = tmp1 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 4.0 tmp11 = tmp10 - tmp9 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tmp13 * tmp13 tmp15 = tmp6 * tmp14 tmp16 = tmp2 + tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_pow_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean] triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ContrastiveLoss(nn.Module): """ Contrastive loss Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise """ def __init__(self, margin): super(ContrastiveLoss, self).__init__() self.margin = margin self.eps = 1e-09 def forward(self, output1, output2, target, size_average=True): distances = (output2 - output1).pow(2).sum(1) losses = 0.5 * (target.float() * distances + (1 + -1 * target). float() * F.relu(self.margin - (distances + self.eps).sqrt()). pow(2)) return losses.mean() if size_average else losses.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = -1.0 tmp4 = tmp0 * tmp3 tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = 1e-09 tmp8 = tmp1 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 4.0 tmp11 = tmp10 - tmp9 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tmp13 * tmp13 tmp15 = tmp6 * tmp14 tmp16 = tmp2 + tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(nn.Module): """ Contrastive loss Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise """ def __init__(self, margin): super(ContrastiveLossNew, self).__init__() self.margin = margin self.eps = 1e-09 def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
htn274/siamese-triplet
ContrastiveLoss
false
10,170
[ "BSD-3-Clause" ]
0
d468fb939a7ab072a0e1cf1c507a87df1a901852
https://github.com/htn274/siamese-triplet/tree/d468fb939a7ab072a0e1cf1c507a87df1a901852
HuberLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fa/cfa6bszpns5yhug32unm5jaaxp7nppo2yfrmvmrfxoj6vtium6fq.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.smooth_l1_loss] # Source node to ATen node mapping: # loss => abs_1, div, lt, mean, mul, pow_1, sub, sub_1, where # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) triton_per_fused_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_smooth_l1_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.smooth_l1_loss] stream0 = get_raw_stream(0) triton_per_fused_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class HuberLoss(nn.Module): def __init__(self): super().__init__() self.loss = nn.SmoothL1Loss() def forward(self, logits, labels): loss = self.loss(logits, labels) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HuberLossNew(nn.Module): def __init__(self): super().__init__() self.loss = nn.SmoothL1Loss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hslrock/Reinforcement-Learning-Implementation
HuberLoss
false
10,171
[ "MIT" ]
0
31db7e31c92f8e01609bf51d3f8f22211ec0fd5d
https://github.com/hslrock/Reinforcement-Learning-Implementation/tree/31db7e31c92f8e01609bf51d3f8f22211ec0fd5d
CoorsNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/se/csev7iv7qxmhng54zan3hgll7djidvorwvoldcetpd626rrdz2a3.py # Topologically Sorted Source Nodes: [norm, clamp, normed_coors, mul], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul] # Source node to ATen node mapping: # clamp => clamp_min # mul => mul # norm => pow_1, pow_2, sum_1 # normed_coors => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp15 * tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, normed_coors, mul], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CoorsNorm(nn.Module): def __init__(self, eps=1e-08, scale_init=1.0): super().__init__() self.eps = eps scale = torch.zeros(1).fill_(scale_init) self.scale = nn.Parameter(scale) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) return normed_coors * self.scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp15 * tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)]( primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class CoorsNormNew(nn.Module): def __init__(self, eps=1e-08, scale_init=1.0): super().__init__() self.eps = eps scale = torch.zeros(1).fill_(scale_init) self.scale = nn.Parameter(scale) def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
hypnopump/En-transformer
CoorsNorm
false
10,172
[ "MIT" ]
0
b52f0e5d79a886512f9d438de345fc8a9eae6420
https://github.com/hypnopump/En-transformer/tree/b52f0e5d79a886512f9d438de345fc8a9eae6420
InitConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/rc/crcbvljomz4rhpd2d25pe5zidhn6m6igcqc5c2okorn3x27sgc57.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.bernoulli] # Source node to ATen node mapping: # y_1 => bernoulli # Graph fragment: # %bernoulli : [num_users=2] = call_function[target=torch.ops.aten.bernoulli.p](args = (%empty, 0.8), kwargs = {}) triton_poi_fused_bernoulli_0 = async_compile.triton('triton_poi_fused_bernoulli_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bernoulli_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bernoulli_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = float("nan") tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/q7/cq7iulydceioc26k22xuz3efipl37ginn756i4i4mi2luwjx6xxm.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # y_1 => div, mul # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%bernoulli, 0.8), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %div), kwargs = {}) triton_poi_fused_div_mul_1 = async_compile.triton('triton_poi_fused_div_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 1.25 tmp5 = tmp3 * tmp4 tmp6 = tmp2 * tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.bernoulli] stream0 = get_raw_stream(0) triton_poi_fused_bernoulli_0.run(buf2, 16, grid=grid(16), stream=stream0) torch.ops.aten.bernoulli_.float(buf2, 0.8) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.div, aten.mul] triton_poi_fused_div_mul_1.run(buf4, primals_2, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 return (reinterpret_tensor(buf4, (16, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class InitConv(nn.Module): def __init__(self, in_channels=4, out_channels=16, dropout=0.2): super(InitConv, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1) self.dropout = dropout def forward(self, x): y = self.conv(x) y = F.dropout3d(y, self.dropout) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_bernoulli_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = float('nan') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 1.25 tmp5 = tmp3 * tmp4 tmp6 = tmp2 * tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_bernoulli_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) torch.ops.aten.bernoulli_.float(buf2, 0.8) buf4 = buf0 del buf0 triton_poi_fused_div_mul_1[grid(1024)](buf4, primals_2, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf4, (16, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 class InitConvNew(nn.Module): def __init__(self, in_channels=4, out_channels=16, dropout=0.2): super(InitConvNew, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1) self.dropout = dropout def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
felixquinton1/TransBTS
InitConv
false
10,173
[ "Apache-2.0" ]
0
6992c902413ba15f40ebfe9f6d5d0e3594051033
https://github.com/felixquinton1/TransBTS/tree/6992c902413ba15f40ebfe9f6d5d0e3594051033
DQN_Simple
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/x3/cx3vj33z3tpfqf7y5eyq5z5jjba7dgt7ne7hw4gjsclwj5porq6x.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xg/cxglw5tnxo62srebich4jgqdzvs52nlik36j6floylioypgxguqt.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_1 => gt_1, mul_1, where_1 # Graph fragment: # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.01), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 128), (128, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 8192, grid=grid(8192), stream=stream0) del buf0 del primals_2 buf3 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 64), (1, 128), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 4096, grid=grid(4096), stream=stream0) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_7 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 128), (128, 1), 0), buf4, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch.autograd import Variable import torch.nn.functional as F import torch.nn as nn class NoisyLinear(nn.Module): def __init__(self, in_features, out_features, std_init=0.4): super(NoisyLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.std_init = std_init self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features)) self.weight_sigma = nn.Parameter(torch.Tensor(out_features, in_features)) self.register_buffer('weight_epsilon', torch.Tensor(out_features, in_features)) self.bias_mu = nn.Parameter(torch.Tensor(out_features)) self.bias_sigma = nn.Parameter(torch.Tensor(out_features)) self.register_buffer('bias_epsilon', torch.Tensor(out_features)) self.reset_parameters() self.reset_noise() def reset_parameters(self): mu_range = 1 / math.sqrt(self.weight_mu.size(1)) self.weight_mu.data.uniform_(-mu_range, mu_range) self.weight_sigma.data.fill_(self.std_init / math.sqrt(self. weight_sigma.size(1))) self.bias_mu.data.uniform_(-mu_range, mu_range) self.bias_sigma.data.fill_(self.std_init / math.sqrt(self. bias_sigma.size(0))) def _scale_noise(self, size): x = torch.randn(size) x = x.sign().mul(x.abs().sqrt()) return x def reset_noise(self): epsilon_in = self._scale_noise(self.in_features) epsilon_out = self._scale_noise(self.out_features) self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in)) self.bias_epsilon.copy_(self._scale_noise(self.out_features)) def forward(self, input): if self.training: return F.linear(input, self.weight_mu + self.weight_sigma.mul( Variable(self.weight_epsilon)), self.bias_mu + self. bias_sigma.mul(Variable(self.bias_epsilon))) else: return F.linear(input, self.weight_mu, self.bias_mu) class DQN_Simple(nn.Module): def __init__(self, input_dim, num_actions, dueling=False, noise_linear= False): super(DQN_Simple, self).__init__() self.dueling = dueling linear = nn.Linear if noise_linear: linear = NoisyLinear self.fc1 = linear(input_dim, 128) self.fc2 = linear(128, 64) if self.dueling: self.fc_v = linear(64, 1) self.fc_As = linear(64, num_actions) else: self.fc_head = linear(64, num_actions) self.relu = nn.ReLU() self.lrelu = nn.LeakyReLU(0.01) def forward(self, x): x = self.lrelu(self.fc1(x)) x = self.lrelu(self.fc2(x)) if self.dueling: v = self.fc_v(x) As = self.fc_As(x) q = v.expand_as(As) + (As - As.mean(1, keepdim=True).expand_as(As)) else: q = self.fc_head(x) return q def reset_noise(self): for name, module in self.named_children(): if 'fc' in name: module.reset_noise() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch.autograd import Variable import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 128), (128, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(8192)](buf0, primals_2, buf1, buf2, 8192, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 64), (1, 128), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch. float32) triton_poi_fused_leaky_relu_1[grid(4096)](buf3, primals_5, buf4, buf5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_7 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 128), (128, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 64), (64, 1), 0 ), primals_6, primals_4 class NoisyLinear(nn.Module): def __init__(self, in_features, out_features, std_init=0.4): super(NoisyLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.std_init = std_init self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features)) self.weight_sigma = nn.Parameter(torch.Tensor(out_features, in_features)) self.register_buffer('weight_epsilon', torch.Tensor(out_features, in_features)) self.bias_mu = nn.Parameter(torch.Tensor(out_features)) self.bias_sigma = nn.Parameter(torch.Tensor(out_features)) self.register_buffer('bias_epsilon', torch.Tensor(out_features)) self.reset_parameters() self.reset_noise() def reset_parameters(self): mu_range = 1 / math.sqrt(self.weight_mu.size(1)) self.weight_mu.data.uniform_(-mu_range, mu_range) self.weight_sigma.data.fill_(self.std_init / math.sqrt(self. weight_sigma.size(1))) self.bias_mu.data.uniform_(-mu_range, mu_range) self.bias_sigma.data.fill_(self.std_init / math.sqrt(self. bias_sigma.size(0))) def _scale_noise(self, size): x = torch.randn(size) x = x.sign().mul(x.abs().sqrt()) return x def reset_noise(self): epsilon_in = self._scale_noise(self.in_features) epsilon_out = self._scale_noise(self.out_features) self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in)) self.bias_epsilon.copy_(self._scale_noise(self.out_features)) def forward(self, input): if self.training: return F.linear(input, self.weight_mu + self.weight_sigma.mul( Variable(self.weight_epsilon)), self.bias_mu + self. bias_sigma.mul(Variable(self.bias_epsilon))) else: return F.linear(input, self.weight_mu, self.bias_mu) class DQN_SimpleNew(nn.Module): def __init__(self, input_dim, num_actions, dueling=False, noise_linear= False): super(DQN_SimpleNew, self).__init__() self.dueling = dueling linear = nn.Linear if noise_linear: linear = NoisyLinear self.fc1 = linear(input_dim, 128) self.fc2 = linear(128, 64) if self.dueling: self.fc_v = linear(64, 1) self.fc_As = linear(64, num_actions) else: self.fc_head = linear(64, num_actions) self.relu = nn.ReLU() self.lrelu = nn.LeakyReLU(0.01) def reset_noise(self): for name, module in self.named_children(): if 'fc' in name: module.reset_noise() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc_head.weight primals_7 = self.fc_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
exe1023/GA-final
DQN_Simple
false
10,174
[ "MIT" ]
0
dad84cda665ef24e9568a79a2e7ff0a00edf5851
https://github.com/exe1023/GA-final/tree/dad84cda665ef24e9568a79a2e7ff0a00edf5851
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/lh/clhtaboxxs526aw4bqcb7s6xoig5vzwco55tfg6waaga3ao3elgd.py # Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm] # Source node to ATen node mapping: # euclidean_distance => add, pow_1, pow_2, sub, sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + (x0), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kp/ckp53fcort4i65ou77phinf74g3p6q5uw355vo33bget5pqh6vdi.py # Topologically Sorted Source Nodes: [pow_1, mul, sub, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.clamp, aten.add, aten.mean] # Source node to ATen node mapping: # add => add_1 # clamp => clamp_min # loss_contrastive => mean # mul => mul # mul_1 => mul_1 # pow_1 => pow_3 # pow_2 => pow_4 # sub => sub_1 # sub_1 => sub_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {}) triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r2), None) tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp3 = tmp0 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = tmp4 - tmp1 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp5 * tmp9 tmp11 = tmp3 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm] stream0 = get_raw_stream(0) triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [pow_1, mul, sub, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.clamp, aten.add, aten.mean] triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class ContrastiveLoss(torch.nn.Module): """Contrastive loss function""" def __init__(self, margin=1.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch.mean(label * torch.pow(euclidean_distance, 2) + (1 - label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp3 = tmp0 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = tmp4 - tmp1 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp5 * tmp9 tmp11 = tmp3 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(torch.nn.Module): """Contrastive loss function""" def __init__(self, margin=1.0): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
hz512/Smart-Parking-Enforcement-System
ContrastiveLoss
false
10,175
[ "MIT" ]
0
e990903de545693ad6e2536bf167c69ab672d16a
https://github.com/hz512/Smart-Parking-Enforcement-System/tree/e990903de545693ad6e2536bf167c69ab672d16a
REINFORCE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nz/cnzjdcltdo77fzlqbwjg2k6edxl5zjqohubgybwuenc5weaj3cjw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] # Source node to ATen node mapping: # x => expm1, gt, mul, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (256, 4), (4, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) del buf3 return (buf4, primals_1, buf0, buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class REINFORCE(nn.Module): def __init__(self, input_size, num_actions): super(REINFORCE, self).__init__() self.fc = nn.Linear(input_size, 256) self.head = nn.Linear(256, num_actions) self.relu = nn.ReLU() self.elu = nn.ELU() def forward(self, x): x = self.elu(self.fc(x.view(x.size(0), -1))) x = self.head(x) return F.softmax(x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (256, 4), (4, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 return buf4, primals_1, buf0, buf1, buf4, primals_4 class REINFORCENew(nn.Module): def __init__(self, input_size, num_actions): super(REINFORCENew, self).__init__() self.fc = nn.Linear(input_size, 256) self.head = nn.Linear(256, num_actions) self.relu = nn.ReLU() self.elu = nn.ELU() def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_4 = self.head.weight primals_5 = self.head.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
exe1023/GA-final
REINFORCE
false
10,176
[ "MIT" ]
0
dad84cda665ef24e9568a79a2e7ff0a00edf5851
https://github.com/exe1023/GA-final/tree/dad84cda665ef24e9568a79a2e7ff0a00edf5851
LanguageModelCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/y6/cy6c63inntzv3gk7tttvtntl47lyazeloek7emnkrhkhkqbx5kci.py # Topologically Sorted Source Nodes: [neg, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div] # Source node to ATen node mapping: # neg => neg # output => mul # output_1 => div # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg2_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg2_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr2 + (r0), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import * class LanguageModelCriterion(nn.Module): def __init__(self): super(LanguageModelCriterion, self).__init__() def forward(self, input, target, mask): if target.ndim == 3: target = target.reshape(-1, target.shape[2]) mask = mask.reshape(-1, mask.shape[2]) target = target[:, :input.size(1)] mask = mask[:, :input.size(1)] output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class LanguageModelCriterionNew(nn.Module): def __init__(self): super(LanguageModelCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg1_1 = input_0 arg0_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
helloMickey/self-critical.pytorch
LanguageModelCriterion
false
10,177
[ "MIT" ]
0
3a26111012099e13daeb688136fea45186127935
https://github.com/helloMickey/self-critical.pytorch/tree/3a26111012099e13daeb688136fea45186127935
RewardCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/th/cthi7dvxte3xeavjjqrz6omclqig3gqujobvt5ooicycppv45vw4.py # Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div] # Source node to ATen node mapping: # mul => mul # neg => neg # output => mul_1 # output_1 => div # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr2 + (r0), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = r0 % 4 tmp12 = tl.full([1, 1], 0, tl.int64) tmp13 = tmp11 >= tmp12 tmp14 = tl.full([1, 1], 1, tl.int64) tmp15 = tmp11 < tmp14 tmp16 = 1.0 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp15, tmp16, tmp17) tmp19 = tmp11 >= tmp14 tmp20 = tl.full([1, 1], 4, tl.int64) tmp21 = tmp11 < tmp20 tmp22 = tl.load(in_ptr0 + (tl.broadcast_to((4*(r0 // 4)) + ((-1) + (r0 % 4)), [XBLOCK, RBLOCK])), tmp19, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 > tmp12 tmp24 = tmp23.to(tl.float32) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp19, tmp24, tmp25) tmp27 = tl.where(tmp15, tmp18, tmp26) tmp28 = tmp10 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp34 = tl.sum(tmp32, 1)[:, None] tmp35 = tmp31 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import * class RewardCriterion(nn.Module): def __init__(self): super(RewardCriterion, self).__init__() def forward(self, input, seq, reward): input = input.gather(2, seq.unsqueeze(2)).squeeze(2) input = input.reshape(-1) reward = reward.reshape(-1) mask = (seq > 0).float() mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1 ).reshape(-1) output = -input * reward * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = r0 % 4 tmp12 = tl.full([1, 1], 0, tl.int64) tmp14 = tl.full([1, 1], 1, tl.int64) tmp15 = tmp11 < tmp14 tmp16 = 1.0 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp15, tmp16, tmp17) tmp19 = tmp11 >= tmp14 tl.full([1, 1], 4, tl.int64) tmp22 = tl.load(in_ptr0 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4), [XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 > tmp12 tmp24 = tmp23.to(tl.float32) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp19, tmp24, tmp25) tmp27 = tl.where(tmp15, tmp18, tmp26) tmp28 = tmp10 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp34 = tl.sum(tmp32, 1)[:, None] tmp35 = tmp31 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class RewardCriterionNew(nn.Module): def __init__(self): super(RewardCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
helloMickey/self-critical.pytorch
RewardCriterion
false
10,178
[ "MIT" ]
0
3a26111012099e13daeb688136fea45186127935
https://github.com/helloMickey/self-critical.pytorch/tree/3a26111012099e13daeb688136fea45186127935
Upsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ri/cridcblv5y4byffde5hada6qlaegue7d23k6s35nxcc5uqjenrr6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1, add, add_2, clamp_max_1, clamp_min, clamp_min_1, convert_element_type, convert_element_type_1, iota, mul, mul_1, sub, sub_1, sub_2 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_1), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.load(in_ptr0 + (tmp9 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp9 + tmp11 tmp13 = tl.full([1], 3, tl.int64) tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = tl.load(in_ptr0 + (tmp14 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tmp15 - tmp10 tmp17 = tmp9.to(tl.float32) tmp18 = tmp8 - tmp17 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = triton_helpers.minimum(tmp19, tmp4) tmp21 = tmp16 * tmp20 tmp22 = tmp10 + tmp21 tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.functional as F import torch.nn.parallel import torch.optim class Upsample(nn.Module): """ nn.Upsample is deprecated """ def __init__(self, scale_factor, mode='linear'): super(Upsample, self).__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'scale_factor': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp9 + tmp11 tmp13 = tl.full([1], 3, tl.int64) tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = tl.load(in_ptr0 + (tmp14 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp16 = tmp15 - tmp10 tmp17 = tmp9.to(tl.float32) tmp18 = tmp8 - tmp17 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = triton_helpers.minimum(tmp19, tmp4) tmp21 = tmp16 * tmp20 tmp22 = tmp10 + tmp21 tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class UpsampleNew(nn.Module): """ nn.Upsample is deprecated """ def __init__(self, scale_factor, mode='linear'): super(UpsampleNew, self).__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fsImageries/video-to-pose3D
Upsample
false
10,179
[ "MIT" ]
0
098c87ce19dc3331da03e6eac0b9744684eb66f6
https://github.com/fsImageries/video-to-pose3D/tree/098c87ce19dc3331da03e6eac0b9744684eb66f6
EPELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/pu/cpubwn56es2v3ahi7i55khtjwt2lmu5hlmw2jxml4kozxrimfwek.py # Topologically Sorted Source Nodes: [sub, add, norm, lossvalue], Original ATen: [aten.sub, aten.add, aten.linalg_vector_norm, aten.mean] # Source node to ATen node mapping: # add => add # lossvalue => mean # norm => pow_1, pow_2, sum_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-16), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) triton_per_fused_add_linalg_vector_norm_mean_sub_0 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp6 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp7 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp18 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp19 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 - tmp1 tmp3 = 1e-16 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp29, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, add, norm, lossvalue], Original ATen: [aten.sub, aten.add, aten.linalg_vector_norm, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class EPELoss(nn.Module): def __init__(self): super(EPELoss, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output - target + 1e-16, p=2, dim=1).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp6 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = 1e-16 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class EPELossNew(nn.Module): def __init__(self): super(EPELossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
haochen23/GeoProj
EPELoss
false
10,180
[ "MIT" ]
0
4b31f51789f9cc41ea7dc977cee057b8bc8a83cc
https://github.com/haochen23/GeoProj/tree/4b31f51789f9cc41ea7dc977cee057b8bc8a83cc
TripletLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/py/cpyk5b6mlotalxultdmmebirncntxpef6b6uohknjluaz76jqen3.py # Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean] # Source node to ATen node mapping: # add => add # distance_negative => sum_2 # distance_positive => sum_1 # losses => relu # mean => mean # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 4), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {}) triton_per_fused_add_mean_pow_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_mean_pow_relu_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp19 = tl.load(in_ptr2 + (r0 + (64*r1)), None) tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None) tmp26 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None) tmp30 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp20 = tmp0 - tmp19 tmp21 = tmp20 * tmp20 tmp23 = tmp4 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp21 + tmp24 tmp27 = tmp9 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp25 + tmp28 tmp31 = tmp14 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp33 tmp35 = 4.0 tmp36 = tmp34 + tmp35 tmp37 = tl.full([1, 1], 0, tl.int32) tmp38 = triton_helpers.maximum(tmp37, tmp36) tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_mean_pow_relu_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.functional as F import torch.nn.parallel import torch.optim class TripletLoss(nn.Module): """ Triplet loss Takes embeddings of an anchor sample, a positive sample and a negative sample """ def __init__(self, margin): super(TripletLoss, self).__init__() self.margin = margin def forward(self, anchor, positive, negative, size_average=True): distance_positive = (anchor - positive).pow(2).sum(1) distance_negative = (anchor - negative).pow(2).sum(1) losses = F.relu(distance_positive - distance_negative + self.margin) return losses.mean() if size_average else losses.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp20 = tmp0 - tmp19 tmp21 = tmp20 * tmp20 tmp23 = tmp4 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp21 + tmp24 tmp27 = tmp9 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp25 + tmp28 tmp31 = tmp14 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp33 tmp35 = 4.0 tmp36 = tmp34 + tmp35 tmp37 = tl.full([1, 1], 0, tl.int32) tmp38 = triton_helpers.maximum(tmp37, tmp36) tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class TripletLossNew(nn.Module): """ Triplet loss Takes embeddings of an anchor sample, a positive sample and a negative sample """ def __init__(self, margin): super(TripletLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
fsImageries/video-to-pose3D
TripletLoss
false
10,181
[ "MIT" ]
0
098c87ce19dc3331da03e6eac0b9744684eb66f6
https://github.com/fsImageries/video-to-pose3D/tree/098c87ce19dc3331da03e6eac0b9744684eb66f6
JointsMSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ik/cik3daggkczn6yw5fhxreelmv4f7337qr27ccf6o2xrjfrel5wsp.py # Topologically Sorted Source Nodes: [mul, mul_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, mul_9, mul_10, mse_loss_3, mul_11, loss_3, truediv], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div] # Source node to ATen node mapping: # loss => add # loss_1 => add_1 # loss_2 => add_2 # loss_3 => add_3 # mse_loss => mean, pow_1, sub # mse_loss_1 => mean_1, pow_2, sub_1 # mse_loss_2 => mean_2, pow_3, sub_2 # mse_loss_3 => mean_3, pow_4, sub_3 # mul => mul # mul_1 => mul_1 # mul_10 => mul_10 # mul_11 => mul_11 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_8 => mul_8 # mul_9 => mul_9 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %select), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %select_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, %select_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, %select_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, %select_4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_5, %select_5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_8), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_6, %select_6), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_7, %select_7), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_4,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_3, 0.5), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_11), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4), kwargs = {}) triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = tmp10 * tmp11 tmp14 = tmp13 * tmp11 tmp15 = tmp12 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp22 = tmp20 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp22 - tmp24 tmp26 = tmp25 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp32 = tmp30 * tmp31 tmp34 = tmp33 * tmp31 tmp35 = tmp32 - tmp34 tmp36 = tmp35 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 4.0 tmp41 = tmp9 / tmp40 tmp42 = 0.5 tmp43 = tmp41 * tmp42 tmp44 = 0.0 tmp45 = tmp43 + tmp44 tmp46 = tmp19 / tmp40 tmp47 = tmp46 * tmp42 tmp48 = tmp45 + tmp47 tmp49 = tmp29 / tmp40 tmp50 = tmp49 * tmp42 tmp51 = tmp48 + tmp50 tmp52 = tmp39 / tmp40 tmp53 = tmp52 * tmp42 tmp54 = tmp51 + tmp53 tmp55 = 0.25 tmp56 = tmp54 * tmp55 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp56, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, mul_9, mul_10, mse_loss_3, mul_11, loss_3, truediv], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0.run(buf4, arg0_1, arg2_1, arg1_1, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel import torch.optim class JointsMSELoss(nn.Module): def __init__(self, use_target_weight): super(JointsMSELoss, self).__init__() self.criterion = nn.MSELoss() self.use_target_weight = use_target_weight def forward(self, output, target, target_weight): batch_size = output.size(0) num_joints = output.size(1) heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1 ) heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1) loss = 0 for idx in range(num_joints): heatmap_pred = heatmaps_pred[idx].squeeze() heatmap_gt = heatmaps_gt[idx].squeeze() if self.use_target_weight: loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight [:, idx]), heatmap_gt.mul(target_weight[:, idx])) else: loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) return loss / num_joints def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'use_target_weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = tmp10 * tmp11 tmp14 = tmp13 * tmp11 tmp15 = tmp12 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp22 = tmp20 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp22 - tmp24 tmp26 = tmp25 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp32 = tmp30 * tmp31 tmp34 = tmp33 * tmp31 tmp35 = tmp32 - tmp34 tmp36 = tmp35 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 4.0 tmp41 = tmp9 / tmp40 tmp42 = 0.5 tmp43 = tmp41 * tmp42 tmp44 = 0.0 tmp45 = tmp43 + tmp44 tmp46 = tmp19 / tmp40 tmp47 = tmp46 * tmp42 tmp48 = tmp45 + tmp47 tmp49 = tmp29 / tmp40 tmp50 = tmp49 * tmp42 tmp51 = tmp48 + tmp50 tmp52 = tmp39 / tmp40 tmp53 = tmp52 * tmp42 tmp54 = tmp51 + tmp53 tmp55 = 0.25 tmp56 = tmp54 * tmp55 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf4, arg0_1, arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf4, class JointsMSELossNew(nn.Module): def __init__(self, use_target_weight): super(JointsMSELossNew, self).__init__() self.criterion = nn.MSELoss() self.use_target_weight = use_target_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
fsImageries/video-to-pose3D
JointsMSELoss
false
10,182
[ "MIT" ]
0
098c87ce19dc3331da03e6eac0b9744684eb66f6
https://github.com/fsImageries/video-to-pose3D/tree/098c87ce19dc3331da03e6eac0b9744684eb66f6
SpeakerIntegrator
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6k/c6kjswdpfp3aalfve5s3xywyhlsv6ovdpjf55crab75e2n6rtvrp.py # Topologically Sorted Source Nodes: [spembs_1, x], Original ATen: [aten.repeat, aten.add] # Source node to ATen node mapping: # spembs_1 => repeat # x => add # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4, 1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %repeat), kwargs = {}) triton_poi_fused_add_repeat_0 = async_compile.triton('triton_poi_fused_add_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [spembs_1, x], Original ATen: [aten.repeat, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_repeat_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class SpeakerIntegrator(nn.Module): def __init__(self): super(SpeakerIntegrator, self).__init__() def forward(self, x, spembs): """ x shape : (batch, 39, 256) spembs shape : (batch, 256) """ spembs = spembs.unsqueeze(1) spembs = spembs.repeat(1, x.shape[1], 1) x = x + spembs return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_repeat_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class SpeakerIntegratorNew(nn.Module): def __init__(self): super(SpeakerIntegratorNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hwRG/FastSpeech2-Pytorch-old-man_city
SpeakerIntegrator
false
10,183
[ "MIT" ]
0
c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573
https://github.com/hwRG/FastSpeech2-Pytorch-old-man_city/tree/c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573
SceneParserHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/sg/csgdg3aydvavjgap6qhul7z76ytwauxsmjtmjmpyykxu7qjn2g4l.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.unsqueeze] # Source node to ATen node mapping: # x => unsqueeze # Graph fragment: # %unsqueeze : [num_users=2] = call_function[target=torch.ops.aten.unsqueeze.default](args = (%primals_3, 0), kwargs = {}) triton_poi_fused_unsqueeze_0 = async_compile.triton('triton_poi_fused_unsqueeze_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_unsqueeze_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_unsqueeze_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ib/cibhtju55xp4knpcrfnffs5cery4dvtpmr4b6n5a4xieye3owbpr.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mean] # Source node to ATen node mapping: # x_1 => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%squeeze, [-1, -2], True), kwargs = {}) triton_per_fused_mean_1 = async_compile.triton('triton_per_fused_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 2048 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (2048*r1)), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (2048, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.unsqueeze] stream0 = get_raw_stream(0) triton_poi_fused_unsqueeze_0.run(primals_3, buf0, 4, 16, grid=grid(4, 16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 2048, 4, 4), (32768, 1, 8192, 2048)) buf2 = empty_strided_cuda((2048, 1, 1), (1, 2048, 2048), torch.float32) buf3 = reinterpret_tensor(buf2, (2048, 1, 1), (1, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mean] triton_per_fused_mean_1.run(buf3, buf1, primals_2, 2048, 16, grid=grid(2048), stream=stream0) del buf1 del primals_2 buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (1, 2048), (0, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_5 return (reinterpret_tensor(buf4, (4, ), (1, ), 0), primals_1, buf0, reinterpret_tensor(buf3, (1, 2048), (2048, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2048, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class SceneParserHead(nn.Module): def __init__(self, in_channels, num_classes): super(SceneParserHead, self).__init__() self.conv1x1 = nn.Conv2d(in_channels, 2048, 1, 1) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(2048, num_classes) def forward(self, x): x = self.conv1x1(x) x = self.avgpool(x) x = torch.flatten(x) x = self.fc(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_unsqueeze_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r1), None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_unsqueeze_0[grid(4, 16)](primals_3, buf0, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 2048, 4, 4), (32768, 1, 8192, 2048)) buf2 = empty_strided_cuda((2048, 1, 1), (1, 2048, 2048), torch.float32) buf3 = reinterpret_tensor(buf2, (2048, 1, 1), (1, 1, 1), 0) del buf2 triton_per_fused_mean_1[grid(2048)](buf3, buf1, primals_2, 2048, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf1 del primals_2 buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (1, 2048), (0, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_5 return reinterpret_tensor(buf4, (4,), (1,), 0 ), primals_1, buf0, reinterpret_tensor(buf3, (1, 2048), (2048, 1), 0 ), primals_4 class SceneParserHeadNew(nn.Module): def __init__(self, in_channels, num_classes): super(SceneParserHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(in_channels, 2048, 1, 1) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(2048, num_classes) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
hangwudy/pytorch_tutorial
SceneParserHead
false
10,184
[ "MIT" ]
0
857b128253bd1e2bd30cb85e995c757e5acbb3a2
https://github.com/hangwudy/pytorch_tutorial/tree/857b128253bd1e2bd30cb85e995c757e5acbb3a2
ConvTemporalGraphical
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/kn/cknjlh5mzsg6tap75kwweiwuidyxeolmhbgzpsrthbqvrieuksvv.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x4 = (xindex // 256) x5 = (xindex // 16) % 16 x3 = (xindex // 64) % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x5) + (64*x1) + (256*x4)), xmask) tmp1 = tl.load(in_ptr1 + (x3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x6), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_4, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 1024, grid=grid(1024), stream=stream0) del buf0 del primals_3 buf2 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (1, 64, 16), (0, 16, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0), out=buf2) del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel import torch.optim class ConvTemporalGraphical(nn.Module): """The basic module for applying a graph convolution. Args: in_channels (int): Number of channels in the input sequence data out_channels (int): Number of channels produced by the convolution kernel_size (int): Size of the graph convolving kernel t_kernel_size (int): Size of the temporal convolving kernel t_stride (int, optional): Stride of the temporal convolution. Default: 1 t_padding (int, optional): Temporal zero-padding added to both sides of the input. Default: 0 t_dilation (int, optional): Spacing between temporal kernel elements. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` Shape: - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format where :math:`N` is a batch size, :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, :math:`T_{in}/T_{out}` is a length of input/output sequence, :math:`V` is the number of graph nodes. """ def __init__(self, in_channels, out_channels, kernel_size, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(in_channels, out_channels * kernel_size, kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride= (t_stride, 1), dilation=(t_dilation, 1), bias=bias) def forward(self, x, A): assert A.size(0) == self.kernel_size x = self.conv(x) n, kc, t, v = x.size() x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v) x = torch.einsum('nkctv,kvw->nctw', (x, A)) return x.contiguous(), A def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x4 = xindex // 256 x5 = xindex // 16 % 16 x3 = xindex // 64 % 4 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x5 + 64 * x1 + 256 * x4), xmask) tmp1 = tl.load(in_ptr1 + (x3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x6, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_4, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](buf0, primals_3, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf2 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (1, 64, 16), (0, 16, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0), out=buf2) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0) class ConvTemporalGraphicalNew(nn.Module): """The basic module for applying a graph convolution. Args: in_channels (int): Number of channels in the input sequence data out_channels (int): Number of channels produced by the convolution kernel_size (int): Size of the graph convolving kernel t_kernel_size (int): Size of the temporal convolving kernel t_stride (int, optional): Stride of the temporal convolution. Default: 1 t_padding (int, optional): Temporal zero-padding added to both sides of the input. Default: 0 t_dilation (int, optional): Spacing between temporal kernel elements. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` Shape: - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format where :math:`N` is a batch size, :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, :math:`T_{in}/T_{out}` is a length of input/output sequence, :math:`V` is the number of graph nodes. """ def __init__(self, in_channels, out_channels, kernel_size, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(in_channels, out_channels * kernel_size, kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride= (t_stride, 1), dilation=(t_dilation, 1), bias=bias) def forward(self, input_0, input_1): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
fsImageries/video-to-pose3D
ConvTemporalGraphical
false
10,185
[ "MIT" ]
0
098c87ce19dc3331da03e6eac0b9744684eb66f6
https://github.com/fsImageries/video-to-pose3D/tree/098c87ce19dc3331da03e6eac0b9744684eb66f6
Maxout
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/54/c54fiiozms64dqfszq2hf52cdztx43kas6yivnlda7p3bxzbtzle.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Maxout(nn.Module): def __init__(self, pool_size): super().__init__() self._pool_size = pool_size def forward(self, x): assert x.shape[-1 ] % self._pool_size == 0, 'Wrong input last dim size ({}) for Maxout({})'.format( x.shape[-1], self._pool_size) m, _i = x.view(*x.shape[:-1], x.shape[-1] // self._pool_size, self. _pool_size).max(-1) return m def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'pool_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxoutNew(nn.Module): def __init__(self, pool_size): super().__init__() self._pool_size = pool_size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hekaplex/FocusSeq2Seq
Maxout
false
10,186
[ "MIT" ]
0
9bab5d3aa020b4d587add9d7a070335cf0feb2d6
https://github.com/hekaplex/FocusSeq2Seq/tree/9bab5d3aa020b4d587add9d7a070335cf0feb2d6
RNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nc/cnc6a3vkphurm472zdavmn3qnff4lmaezxs63jlllw2kks2e62a4.py # Topologically Sorted Source Nodes: [input_combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_combined => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ih/cihkol22ojnmrk724q4odcm6ilz575wmbnulie74gzdcgue24tib.py # Topologically Sorted Source Nodes: [output_combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # output_combined => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%addmm, %addmm_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_combined], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4) # alias # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0) # alias # Topologically Sorted Source Nodes: [output_combined], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf3, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_9 return (buf5, buf1, buf0, buf4, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable class RNN(nn.Module): def __init__(self, category_size, input_size, hidden_size, output_size): super(RNN, self).__init__() self.category_size = category_size self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.i2h = nn.Linear(category_size + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(category_size + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.softmax = nn.LogSoftmax() def forward(self, category, input, hidden): input_combined = torch.cat((category, input, hidden), 1) hidden = self.i2h(input_combined) output = self.i2o(input_combined) output_combined = torch.cat((hidden, output), 1) output = self.o2o(output_combined) return output, hidden def init_hidden(self): return Variable(torch.zeros(1, self.hidden_size)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'category_size': 4, 'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4) extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0) triton_poi_fused_cat_1[grid(16)](buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_9 return buf5, buf1, buf0, buf4, primals_8 class RNNNew(nn.Module): def __init__(self, category_size, input_size, hidden_size, output_size): super(RNNNew, self).__init__() self.category_size = category_size self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.i2h = nn.Linear(category_size + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(category_size + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.softmax = nn.LogSoftmax() def init_hidden(self): return Variable(torch.zeros(1, self.hidden_size)) def forward(self, input_0, input_1, input_2): primals_4 = self.i2h.weight primals_5 = self.i2h.bias primals_6 = self.i2o.weight primals_7 = self.i2o.bias primals_8 = self.o2o.weight primals_9 = self.o2o.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
iclementine/practical-pytorch
RNN
false
10,187
[ "MIT" ]
0
88e2e53e47328cdb3ec23573aec3ff0421f1a2b7
https://github.com/iclementine/practical-pytorch/tree/88e2e53e47328cdb3ec23573aec3ff0421f1a2b7
TVLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/sj/csj7tclbaqxg2m6c7gynspm34ueipcmkkv2zflt5nn7nyech4rsq.py # Topologically Sorted Source Nodes: [sub_1, pow_2, h_variance], Original ATen: [aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # h_variance => sum_2 # pow_2 => pow_2 # sub_1 => sub_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_11, %slice_15), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1, 2, 3]), kwargs = {}) triton_per_fused_pow_sub_sum_0 = async_compile.triton('triton_per_fused_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex % 12 r2 = (rindex // 12) x0 = xindex tmp0 = tl.load(in_ptr0 + (4 + r1 + (16*r2) + (64*x0)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1 + (16*r2) + (64*x0)), rmask & xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zs/czsr4vcu7blr6iilxu7urwnv3ic5mrjmrymoznunuzt2yh6m5pmh.py # Topologically Sorted Source Nodes: [sub, pow_1, w_variance], Original ATen: [aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # pow_1 => pow_1 # sub => sub # w_variance => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3]), kwargs = {}) triton_per_fused_pow_sub_sum_1 = async_compile.triton('triton_per_fused_pow_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_sub_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex % 3 r2 = (rindex // 3) x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + r1 + (4*r2) + (64*x0)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1 + (4*r2) + (64*x0)), rmask & xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5p/c5pqv77fzadbchttlp3bzs45ceju3pmpek4icseimtay4keno3bi.py # Topologically Sorted Source Nodes: [add, score, score_1], Original ATen: [aten.add, aten.sqrt, aten.mean] # Source node to ATen node mapping: # add => add # score => sqrt # score_1 => mean # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sqrt, [0]), kwargs = {}) triton_per_fused_add_mean_sqrt_2 = async_compile.triton('triton_per_fused_add_mean_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sqrt_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_sqrt_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 + tmp1 tmp3 = libdevice.sqrt(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub_1, pow_2, h_variance], Original ATen: [aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_pow_sub_sum_0.run(arg0_1, buf0, 4, 48, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, w_variance], Original ATen: [aten.sub, aten.pow, aten.sum] triton_per_fused_pow_sub_sum_1.run(arg0_1, buf1, 4, 48, grid=grid(4), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [add, score, score_1], Original ATen: [aten.add, aten.sqrt, aten.mean] triton_per_fused_add_mean_sqrt_2.run(buf3, buf0, buf1, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple from torch.nn.modules.loss import _Loss from typing import List from typing import Optional def _reduce(x: 'torch.Tensor', reduction: 'str'='mean') ->torch.Tensor: """Reduce input in batch dimension if needed. Args: x: Tensor with shape (N, *). reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` """ if reduction == 'none': return x elif reduction == 'mean': return x.mean(dim=0) elif reduction == 'sum': return x.sum(dim=0) else: raise ValueError( "Uknown reduction. Expected one of {'none', 'mean', 'sum'}") def _validate_input(tensors: 'List[torch.Tensor]', dim_range: 'Tuple[int, int]'=(0, -1), data_range: 'Tuple[float, float]'=(0.0, -1.0 ), size_range: 'Optional[Tuple[int, int]]'=None) ->None: """Check that input(-s) satisfies the requirements Args: tensors: Tensors to check dim_range: Allowed number of dimensions. (min, max) data_range: Allowed range of values in tensors. (min, max) size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1) """ if not __debug__: return x = tensors[0] for t in tensors: assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}' assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}' if size_range is None: assert t.size() == x.size( ), f'Expected tensors with same size, got {t.size()} and {x.size()}' else: assert t.size()[size_range[0]:size_range[1]] == x.size()[size_range [0]:size_range[1] ], f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}' if dim_range[0] == dim_range[1]: assert t.dim() == dim_range[0 ], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}' elif dim_range[0] < dim_range[1]: assert dim_range[0] <= t.dim() <= dim_range[1 ], f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}' if data_range[0] < data_range[1]: assert data_range[0] <= t.min( ), f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}' assert t.max() <= data_range[1 ], f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}' def total_variation(x: 'torch.Tensor', reduction: 'str'='mean', norm_type: 'str'='l2') ->torch.Tensor: """Compute Total Variation metric Args: x: Tensor. Shape :math:`(N, C, H, W)`. reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` norm_type: {'l1', 'l2', 'l2_squared'}, defines which type of norm to implement, isotropic or anisotropic. Returns: score : Total variation of a given tensor References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ _validate_input([x], dim_range=(4, 4), data_range=(0, -1)) if norm_type == 'l1': w_variance = torch.sum(torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1]), dim=[1, 2, 3]) h_variance = torch.sum(torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :]), dim=[1, 2, 3]) score = h_variance + w_variance elif norm_type == 'l2': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = torch.sqrt(h_variance + w_variance) elif norm_type == 'l2_squared': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = h_variance + w_variance else: raise ValueError( "Incorrect norm type, should be one of {'l1', 'l2', 'l2_squared'}") return _reduce(score, reduction) class TVLoss(_Loss): """Creates a criterion that measures the total variation of the the given input :math:`x`. If :attr:`norm_type` set to ``'l2'`` the loss can be described as: .. math:: TV(x) = \\sum_{N}\\sqrt{\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}|^2 + |x_{:, :, i, j+1} - x_{:, :, i, j}|^2)} Else if :attr:`norm_type` set to ``'l1'``: .. math:: TV(x) = \\sum_{N}\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}| + |x_{:, :, i, j+1} - x_{:, :, i, j}|) where :math:`N` is the batch size, `C` is the channel size. Args: norm_type: one of {'l1', 'l2', 'l2_squared'} reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` Examples:: >>> loss = TVLoss() >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) >>> output = loss(x) >>> output.backward() References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ def __init__(self, norm_type: 'str'='l2', reduction: 'str'='mean'): super().__init__() self.norm_type = norm_type self.reduction = reduction def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Computation of Total Variation (TV) index as a loss function. Args: x: An input tensor. Shape :math:`(N, C, H, W)`. Returns: Value of TV loss to be minimized. """ score = total_variation(x, reduction=self.reduction, norm_type=self .norm_type) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from typing import Tuple from torch.nn.modules.loss import _Loss from typing import List from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex % 12 r2 = rindex // 12 x0 = xindex tmp0 = tl.load(in_ptr0 + (4 + r1 + 16 * r2 + 64 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1 + 16 * r2 + 64 * x0), rmask & xmask, other=0.0 ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_pow_sub_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex % 3 r2 = rindex // 3 x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + r1 + 4 * r2 + 64 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1 + 4 * r2 + 64 * x0), rmask & xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_add_mean_sqrt_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp3 = libdevice.sqrt(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_pow_sub_sum_0[grid(4)](arg0_1, buf0, 4, 48, XBLOCK =1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_pow_sub_sum_1[grid(4)](arg0_1, buf1, 4, 48, XBLOCK =1, num_warps=2, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_mean_sqrt_2[grid(1)](buf3, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, def _reduce(x: 'torch.Tensor', reduction: 'str'='mean') ->torch.Tensor: """Reduce input in batch dimension if needed. Args: x: Tensor with shape (N, *). reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` """ if reduction == 'none': return x elif reduction == 'mean': return x.mean(dim=0) elif reduction == 'sum': return x.sum(dim=0) else: raise ValueError( "Uknown reduction. Expected one of {'none', 'mean', 'sum'}") def _validate_input(tensors: 'List[torch.Tensor]', dim_range: 'Tuple[int, int]'=(0, -1), data_range: 'Tuple[float, float]'=(0.0, -1.0 ), size_range: 'Optional[Tuple[int, int]]'=None) ->None: """Check that input(-s) satisfies the requirements Args: tensors: Tensors to check dim_range: Allowed number of dimensions. (min, max) data_range: Allowed range of values in tensors. (min, max) size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1) """ if not __debug__: return x = tensors[0] for t in tensors: assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}' assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}' if size_range is None: assert t.size() == x.size( ), f'Expected tensors with same size, got {t.size()} and {x.size()}' else: assert t.size()[size_range[0]:size_range[1]] == x.size()[size_range [0]:size_range[1] ], f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}' if dim_range[0] == dim_range[1]: assert t.dim() == dim_range[0 ], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}' elif dim_range[0] < dim_range[1]: assert dim_range[0] <= t.dim() <= dim_range[1 ], f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}' if data_range[0] < data_range[1]: assert data_range[0] <= t.min( ), f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}' assert t.max() <= data_range[1 ], f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}' def total_variation(x: 'torch.Tensor', reduction: 'str'='mean', norm_type: 'str'='l2') ->torch.Tensor: """Compute Total Variation metric Args: x: Tensor. Shape :math:`(N, C, H, W)`. reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` norm_type: {'l1', 'l2', 'l2_squared'}, defines which type of norm to implement, isotropic or anisotropic. Returns: score : Total variation of a given tensor References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ _validate_input([x], dim_range=(4, 4), data_range=(0, -1)) if norm_type == 'l1': w_variance = torch.sum(torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1]), dim=[1, 2, 3]) h_variance = torch.sum(torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :]), dim=[1, 2, 3]) score = h_variance + w_variance elif norm_type == 'l2': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = torch.sqrt(h_variance + w_variance) elif norm_type == 'l2_squared': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = h_variance + w_variance else: raise ValueError( "Incorrect norm type, should be one of {'l1', 'l2', 'l2_squared'}") return _reduce(score, reduction) class TVLossNew(_Loss): """Creates a criterion that measures the total variation of the the given input :math:`x`. If :attr:`norm_type` set to ``'l2'`` the loss can be described as: .. math:: TV(x) = \\sum_{N}\\sqrt{\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}|^2 + |x_{:, :, i, j+1} - x_{:, :, i, j}|^2)} Else if :attr:`norm_type` set to ``'l1'``: .. math:: TV(x) = \\sum_{N}\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}| + |x_{:, :, i, j+1} - x_{:, :, i, j}|) where :math:`N` is the batch size, `C` is the channel size. Args: norm_type: one of {'l1', 'l2', 'l2_squared'} reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` Examples:: >>> loss = TVLoss() >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) >>> output = loss(x) >>> output.backward() References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ def __init__(self, norm_type: 'str'='l2', reduction: 'str'='mean'): super().__init__() self.norm_type = norm_type self.reduction = reduction def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hecoding/piq
TVLoss
false
10,188
[ "Apache-2.0" ]
0
c72143ce9deb30fefaca434a39e4dfc557673e97
https://github.com/hecoding/piq/tree/c72143ce9deb30fefaca434a39e4dfc557673e97
BCEDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/z6/cz6ehk6udjuldkbvdykpkjp4ihcvsvw26c57rsotg2zyo22imkez.py # Topologically Sorted Source Nodes: [bce], Original ATen: [aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # bce => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zs/czsfocxocrg5tbsgfmzmhomswsg7jp4tsfc6o6cysqu2g7ckglyt.py # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => mul_1 # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) triton_per_fused_mul_sum_1 = async_compile.triton('triton_per_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) tl.store(out_ptr2 + (x0), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2f/c2fvdreglrickmt4nxtlq5l7mdvbwbzqliwqtp2oyxpt7isnykti.py # Topologically Sorted Source Nodes: [bce, mul_2, mul_1, add, add_1, add_2, dice, sum_4, truediv_1, dice_1, add_3], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add, aten.div, aten.sum, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # bce => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # dice => div # dice_1 => sub_3 # mul_1 => mul_2 # mul_2 => mul_3 # sum_4 => sum_4 # truediv_1 => div_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1e-05), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %sub_3), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp13 = tl.load(in_out_ptr0 + (0)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-05 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp12 * tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp23 = tmp18 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [bce], Original ATen: [aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] triton_per_fused_mul_sum_1.run(arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [bce, mul_2, mul_1, add, add_1, add_2, dice, sum_4, truediv_1, dice_1, add_3], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add, aten.div, aten.sum, aten.rsub] triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2.run(buf5, buf1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0) del buf1 del buf2 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.nn.functional as F class BCEDiceLoss(nn.Module): def __init__(self): super().__init__() def forward(self, input, target): bce = F.binary_cross_entropy_with_logits(input, target) smooth = 1e-05 input = torch.sigmoid(input) num = target.size(0) input = input.view(num, -1) target = target.view(num, -1) intersection = input * target dice = (2.0 * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) dice = 1 - dice.sum() / num return 0.5 * bce + dice def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp13 = tl.load(in_out_ptr0 + 0) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-05 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp12 * tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp23 = tmp18 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2[ grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, class BCEDiceLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ha55anali/pytorch-nested-unet
BCEDiceLoss
false
10,189
[ "MIT" ]
0
444dbd0ff7764478de662723b211c23bd65d99f9
https://github.com/ha55anali/pytorch-nested-unet/tree/444dbd0ff7764478de662723b211c23bd65d99f9
ProtoLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5e/c5eafxmsumwasri42awhrxtuwh7n26l2jrvvlbn4dwngizcdu3oc.py # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, dists], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] # Source node to ATen node mapping: # dists => mul_1 # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %view_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 1.0), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, -1), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp18 = tmp13 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp16 + tmp19 tmp22 = tmp13 - tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp20 + tmp23 tmp26 = tmp13 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tmp28 * tmp1 tmp30 = -1.0 tmp31 = tmp29 * tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lu/cluxxjgnjqhuzfbvue2s2lxnuwx66cxm7q4ggcftpbj7v75siz7q.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [0], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rx/crxklediwv3chf4tyunfqyfvkii4ho2fuf2sz5thet7wxwtb7zn5.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_2, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bf/cbflxjeewar2iqpe4k6fimdx2tm65t5h2iil4ipnvnu3jxqfkrg5.py # Topologically Sorted Source Nodes: [mul_3, mean_1, mean_2, ce_loss], Original ATen: [aten.mul, aten.mean] # Source node to ATen node mapping: # ce_loss => mul_4 # mean_1 => mean_1 # mean_2 => mean_2 # mul_3 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %arg2_1), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_3, [1]), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mean_1,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, -1.0), kwargs = {}) triton_per_fused_mean_mul_3 = async_compile.triton('triton_per_fused_mean_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = (rindex // 4) % 4 r2 = (rindex // 16) r3 = rindex % 16 tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r3 + (64*r2)), None) tmp3 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + r3 + (64*r2)), None) tmp7 = tl.load(in_ptr0 + (8 + r0 + (16*r1)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + r3 + (64*r2)), None) tmp11 = tl.load(in_ptr0 + (12 + r0 + (16*r1)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + r3 + (64*r2)), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = 64.0 tmp21 = tmp19 / tmp20 tmp22 = -1.0 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/m2/cm2aauuskdmjlbw2u5d6z6p5brkmwpn6v3w4bxiopedlrtw5dulv.py # Topologically Sorted Source Nodes: [argmax_1], Original ATen: [aten.argmax] # Source node to ATen node mapping: # argmax_1 => argmax_1 # Graph fragment: # %argmax_1 : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%permute, -1), kwargs = {}) triton_poi_fused_argmax_4 = async_compile.triton('triton_poi_fused_argmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_argmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x0), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kq/ckqtiy3ut254tqkzvtho5vp5wzgldspn7dcaw3fi3vwhzip6mv37.py # Topologically Sorted Source Nodes: [argmax, float_3, float_4, eq, float_5, accuracy], Original ATen: [aten.argmax, aten._to_copy, aten.eq, aten.mean] # Source node to ATen node mapping: # accuracy => mean_3 # argmax => argmax # eq => eq # float_3 => convert_element_type # float_4 => convert_element_type_1 # float_5 => convert_element_type_2 # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg2_1, -1), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%argmax, torch.float32), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%argmax_1, torch.float32), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%convert_element_type, %convert_element_type_1), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%convert_element_type_2,), kwargs = {}) triton_per_fused__to_copy_argmax_eq_mean_5 = async_compile.triton('triton_per_fused__to_copy_argmax_eq_mean_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_argmax_eq_mean_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_argmax_eq_mean_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 4 r2 = (rindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr1 + (r2 + (4*r1)), None, eviction_policy='evict_last') tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1, 1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tmp46.to(tl.float32) tmp49 = tmp48.to(tl.float32) tmp50 = tmp47 == tmp49 tmp51 = tmp50.to(tl.float32) tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = 64.0 tmp56 = tmp54 / tmp55 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp56, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, dists], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf1, buf2, 64, grid=grid(64), stream=stream0) del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf7 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul_3, mean_1, mean_2, ce_loss], Original ATen: [aten.mul, aten.mean] triton_per_fused_mean_mul_3.run(buf7, buf2, arg2_1, 1, 64, grid=grid(1), stream=stream0) buf5 = empty_strided_cuda((4, 4), (1, 4), torch.int64) # Topologically Sorted Source Nodes: [argmax_1], Original ATen: [aten.argmax] triton_poi_fused_argmax_4.run(buf2, buf5, 16, grid=grid(16), stream=stream0) del buf2 buf6 = empty_strided_cuda((), (), torch.float32) buf8 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [argmax, float_3, float_4, eq, float_5, accuracy], Original ATen: [aten.argmax, aten._to_copy, aten.eq, aten.mean] triton_per_fused__to_copy_argmax_eq_mean_5.run(buf8, arg2_1, buf5, 1, 64, grid=grid(1), stream=stream0) del arg2_1 del buf5 return (buf7, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class ProtoLoss(torch.nn.Module): def __init__(self, num_classes, num_support, num_queries, ndim): super(ProtoLoss, self).__init__() self.num_classes = num_classes self.num_support = num_support self.num_queries = num_queries self.ndim = ndim def euclidean_distance(self, a, b): N, _D = a.shape[0], a.shape[1] M = b.shape[0] a = torch.repeat_interleave(a.unsqueeze(1), repeats=M, dim=1) b = torch.repeat_interleave(b.unsqueeze(0), repeats=N, dim=0) return 1.0 * torch.sum(torch.pow(a - b, 2), 2) def forward(self, x, q, labels_onehot): protox = torch.mean(1.0 * x.reshape([self.num_classes, self. num_support, self.ndim]), 1) dists = self.euclidean_distance(protox, q) logpy = torch.log_softmax(-1.0 * dists, 0).transpose(1, 0).view(self .num_classes, self.num_queries, self.num_classes) ce_loss = -1.0 * torch.mean(torch.mean(logpy * labels_onehot.float( ), 1)) accuracy = torch.mean((torch.argmax(labels_onehot.float(), -1). float() == torch.argmax(logpy, -1).float()).float()) return ce_loss, accuracy def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_classes': 4, 'num_support': 4, 'num_queries': 4, 'ndim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp18 = tmp13 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp16 + tmp19 tmp22 = tmp13 - tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp20 + tmp23 tmp26 = tmp13 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tmp28 * tmp1 tmp30 = -1.0 tmp31 = tmp29 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_per_fused_mean_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 % 4 r2 = rindex // 16 r3 = rindex % 16 tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + (r3 + 64 * r2), None) tmp3 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + r3 + 64 * r2), None) tmp7 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None) tmp11 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = 64.0 tmp21 = tmp19 / tmp20 tmp22 = -1.0 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) @triton.jit def triton_poi_fused_argmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_per_fused__to_copy_argmax_eq_mean_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 4 r2 = rindex // 4 % 4 tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr1 + (r2 + 4 * r1), None, eviction_policy='evict_last' ) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1, 1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tmp46.to(tl.float32) tmp49 = tmp48.to(tl.float32) tmp50 = tmp47 == tmp49 tmp51 = tmp50.to(tl.float32) tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = 64.0 tmp56 = tmp54 / tmp55 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__log_softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf7 = buf3 del buf3 triton_per_fused_mean_mul_3[grid(1)](buf7, buf2, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4, 4), (1, 4), torch.int64) triton_poi_fused_argmax_4[grid(16)](buf2, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf6 = empty_strided_cuda((), (), torch.float32) buf8 = buf6 del buf6 triton_per_fused__to_copy_argmax_eq_mean_5[grid(1)](buf8, arg2_1, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 del buf5 return buf7, buf8 class ProtoLossNew(torch.nn.Module): def __init__(self, num_classes, num_support, num_queries, ndim): super(ProtoLossNew, self).__init__() self.num_classes = num_classes self.num_support = num_support self.num_queries = num_queries self.ndim = ndim def euclidean_distance(self, a, b): N, _D = a.shape[0], a.shape[1] M = b.shape[0] a = torch.repeat_interleave(a.unsqueeze(1), repeats=M, dim=1) b = torch.repeat_interleave(b.unsqueeze(0), repeats=N, dim=0) return 1.0 * torch.sum(torch.pow(a - b, 2), 2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
gradjitta/Prototypical-Networks
ProtoLoss
false
10,191
[ "MIT" ]
0
9ec344f7299353889e2087224b80a74519ca1a3c
https://github.com/gradjitta/Prototypical-Networks/tree/9ec344f7299353889e2087224b80a74519ca1a3c