entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
NormedLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/c4/cc4mo2hxcdeykamlqh3edsugtiwa5qgnvsckmhygtvamwp2crsqf.py # Topologically Sorted Source Nodes: [norm_1, pow_2, add_1, x_, x__1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # norm_1 => pow_4, pow_5, sum_2 # pow_2 => pow_6 # x_ => div_1 # x__1 => mul # Graph fragment: # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [1], True), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_5, 1.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_6, 1e-06), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 20), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp16 = 20.0 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gr/cgrujw4uhohajox4hmoi742gbs4qqn5s46hnwwj2orhjgu6tkgrn.py # Topologically Sorted Source Nodes: [norm, pow_1, add, weight_], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div] # Source node to ATen node mapping: # add => add # norm => pow_1, pow_2, sum_1 # pow_1 => pow_3 # weight_ => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_pow_1 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm_1, pow_2, add_1, x_, x__1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, pow_1, add, weight_], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div] triton_poi_fused_add_div_linalg_vector_norm_pow_1.run(primals_1, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn import torch.onnx class NormedLinear(nn.Linear): """Normalized Linear Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs): super(NormedLinear, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) def forward(self, x): weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).pow( self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture return F.linear(x_, weight_, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp16 = 20.0 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0[grid(256)]( primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_pow_1[grid(16)](primals_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class NormedLinearNew(nn.Linear): """Normalized Linear Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs): super(NormedLinearNew, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ENOT-AutoDL/mmdetection-enot
NormedLinear
false
5,111
[ "Apache-2.0" ]
1
f541749554436e3327bac00eee89b84f66c03551
https://github.com/ENOT-AutoDL/mmdetection-enot/tree/f541749554436e3327bac00eee89b84f66c03551
import torch import torch.nn.functional as F from torch import nn import torch.onnx class Model(nn.Linear): """Normalized Linear Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs): super().__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) def forward(self, x): weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).pow( self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture return F.linear(x_, weight_, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
WaveletConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/x5/cx57ndofhj4et5wnt2rbjkijbjsbca7tan7oleie57gahu7vejxk.py # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mul] # Source node to ATen node mapping: # h_2 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %mm_1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm] extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_4, buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_2, out], Original ATen: [aten.mul, aten.mm] extern_kernels.mm(primals_5, buf2, out=buf3) del buf2 return (buf3, primals_2, primals_4, buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class WaveletConv(nn.Module): def __init__(self, in_features, out_features, num_nodes, bias=False): super().__init__() self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) self.filter = nn.Parameter(torch.ones(num_nodes, 1)) def reset_parameters(self): self.w.reset_parameters() self.filter.data.fill_(1.0) def forward(self, x, wavelet, inverse_wavelet): h = self.w(x) h = inverse_wavelet.mm(h) h = self.filter * h out = wavelet.mm(h) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4, 'num_nodes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_4, buf1, buf2, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_5, buf2, out=buf3) del buf2 return buf3, primals_2, primals_4, buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0) class WaveletConvNew(nn.Module): def __init__(self, in_features, out_features, num_nodes, bias=False): super().__init__() self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) self.filter = nn.Parameter(torch.ones(num_nodes, 1)) def reset_parameters(self): self.w.reset_parameters() self.filter.data.fill_(1.0) def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features})' ) def forward(self, input_0, input_1, input_2): primals_4 = self.filter primals_1 = self.w.weight primals_2 = input_0 primals_3 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
EdisonLeeeee/Graphgallery
WaveletConv
false
5,112
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, out_features, num_nodes, bias=False): super().__init__() self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) self.filter = nn.Parameter(torch.ones(num_nodes, 1)) def reset_parameters(self): self.w.reset_parameters() self.filter.data.fill_(1.0) def forward(self, x, wavelet, inverse_wavelet): h = self.w(x) h = inverse_wavelet.mm(h) h = self.filter * h out = wavelet.mm(h) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
ASPP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/t5/ct55rbjwmolbdezls7qwycmfcekpuopc26ttuokeobmlskp2w4b2.py # Topologically Sorted Source Nodes: [image_features], Original ATen: [aten.mean] # Source node to ATen node mapping: # image_features => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1024 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 256 x1 = (xindex // 256) tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + (x0 + (256*r2) + (4096*x1)), tmp0, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pi/cpiqq6ydqends5jgnpbakdwghpnovztoymnjrsnfd3ycpt6b7cp7.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uo/cuoczpv3osizaqq5r7djdrk3fldwrcibmtcbqzk5o2fs5pbr52u4.py # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # image_features_2 => convert_element_type_1 # Graph fragment: # %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) triton_poi_fused__to_copy_2 = async_compile.triton('triton_poi_fused__to_copy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.full([1], 0, tl.int64) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gq/cgqf6jiptqimofa5qvg7kso74mk3e537skek2ruh7m7z6kgfasxu.py # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] # Source node to ATen node mapping: # image_features_2 => clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) triton_poi_fused__to_copy_arange_clamp_mul_sub_3 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3d/c3dva26t2rnin44x3xvhfsvseu2j47gsmx2tsa7wrehixkbd4qyh.py # Topologically Sorted Source Nodes: [image_features_1, image_features_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # image_features_1 => convolution # image_features_2 => _unsafe_index, _unsafe_index_1, add_2, mul_2, sub_1 # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_mul_sub_4 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4) % 4 x0 = xindex % 4 x5 = (xindex // 16) x2 = (xindex // 16) % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tmp15 = tl.where(tmp14, tmp13, tmp12) tmp16 = tmp11 - tmp11 tmp18 = tmp16 * tmp17 tmp19 = tmp11 + tmp18 tl.store(out_ptr0 + (x6), tmp19, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ph/cphhsnmh5fjmrfgacncrqujbyhvwgx634inbl7stypuoqtei7h6k.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_4, %convolution_1, %convolution_2, %convolution_3, %convolution_4], 1), kwargs = {}) triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 81920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 1280 x3 = (xindex // 20480) x4 = (xindex // 1280) % 16 x2 = (xindex // 5120) % 4 x1 = (xindex // 1280) % 4 x5 = (xindex // 1280) x6 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (16*x0) + (4096*x3)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x2), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([XBLOCK], 1, tl.int32) tmp8 = tmp6 + tmp7 tmp9 = tmp6 < 0 tmp10 = tl.where(tmp9, tmp8, tmp6) tmp11 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = tmp11 < 0 tmp14 = tl.where(tmp13, tmp12, tmp11) tmp15 = tl.load(in_ptr3 + ((256*x3) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr4 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = tl.load(in_ptr5 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 + tmp7 tmp20 = tmp18 < 0 tmp21 = tl.where(tmp20, tmp19, tmp18) tmp22 = tmp17 - tmp17 tmp23 = tl.load(in_ptr6 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp17 + tmp24 tmp26 = tmp25 - tmp5 tmp27 = tl.load(in_ptr7 + (x2), tmp4, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 * tmp27 tmp29 = tmp5 + tmp28 tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp4, tmp29, tmp30) tmp32 = tmp0 >= tmp3 tmp33 = tl.full([1], 512, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tl.load(in_ptr8 + ((256*x5) + ((-256) + x0)), tmp35, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr9 + ((-256) + x0), tmp35, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp0 >= tmp33 tmp42 = tl.full([1], 768, tl.int64) tmp43 = tmp0 < tmp42 tmp44 = tmp41 & tmp43 tmp45 = tl.load(in_ptr10 + ((256*x5) + ((-512) + x0)), tmp44, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr11 + ((-512) + x0), tmp44, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 + tmp46 tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp44, tmp47, tmp48) tmp50 = tmp0 >= tmp42 tmp51 = tl.full([1], 1024, tl.int64) tmp52 = tmp0 < tmp51 tmp53 = tmp50 & tmp52 tmp54 = tl.load(in_ptr12 + ((256*x5) + ((-768) + x0)), tmp53, eviction_policy='evict_last', other=0.0) tmp55 = tl.load(in_ptr13 + ((-768) + x0), tmp53, eviction_policy='evict_last', other=0.0) tmp56 = tmp54 + tmp55 tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp0 >= tmp51 tmp60 = tl.full([1], 1280, tl.int64) tmp61 = tmp0 < tmp60 tmp62 = tl.load(in_ptr14 + ((256*x5) + ((-1024) + x0)), tmp59, eviction_policy='evict_last', other=0.0) tmp63 = tl.load(in_ptr15 + ((-1024) + x0), tmp59, eviction_policy='evict_last', other=0.0) tmp64 = tmp62 + tmp63 tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype) tmp66 = tl.where(tmp59, tmp64, tmp65) tmp67 = tl.where(tmp53, tmp58, tmp66) tmp68 = tl.where(tmp44, tmp49, tmp67) tmp69 = tl.where(tmp35, tmp40, tmp68) tmp70 = tl.where(tmp4, tmp31, tmp69) tl.store(out_ptr0 + (x6), tmp70, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fs/cfs7gssm7ksfsvsee7jbozjkns3y3yijhbfgt7s34hov2kro457t.py # Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution] # Source node to ATen node mapping: # net => convolution_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = (yindex // 256) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (4096*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 1280, 1, 1), (1280, 1, 1, 1)) assert_size_stride(primals_13, (256, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf4 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024), torch.float32) buf5 = reinterpret_tensor(buf4, (4, 256, 1, 1), (256, 1, 256, 256), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [image_features], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf5, primals_1, buf0, 1024, 16, grid=grid(1024), stream=stream0) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_6, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_6 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_8, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_8 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_10, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_10 # Topologically Sorted Source Nodes: [image_features_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 1, 1), (256, 1, 256, 256)) buf7 = empty_strided_cuda((4, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_2.run(buf7, 4, grid=grid(4), stream=stream0) buf8 = empty_strided_cuda((4, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten.add, aten.clamp] triton_poi_fused__to_copy_2.run(buf8, 4, grid=grid(4), stream=stream0) buf9 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp] triton_poi_fused__to_copy_2.run(buf9, 4, grid=grid(4), stream=stream0) buf10 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten.add, aten.clamp] triton_poi_fused__to_copy_2.run(buf10, 4, grid=grid(4), stream=stream0) buf11 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] triton_poi_fused__to_copy_arange_clamp_mul_sub_3.run(buf11, 4, grid=grid(4), stream=stream0) buf12 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [image_features_1, image_features_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf7, buf9, buf6, primals_3, buf10, buf11, buf12, 16384, grid=grid(16384), stream=stream0) buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [image_features_2], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_arange_clamp_mul_sub_3.run(buf13, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [atrous_block1], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 4, 4), (4096, 1, 1024, 256)) # Topologically Sorted Source Nodes: [atrous_block6], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 4, 4), (4096, 1, 1024, 256)) # Topologically Sorted Source Nodes: [atrous_block12], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 4, 4), (4096, 1, 1024, 256)) # Topologically Sorted Source Nodes: [atrous_block18], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(18, 18), dilation=(18, 18), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf18 = empty_strided_cuda((4, 1280, 4, 4), (20480, 1, 5120, 1280), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_5.run(buf12, buf8, buf9, buf6, primals_3, buf10, buf11, buf13, buf14, primals_5, buf15, primals_7, buf16, primals_9, buf17, primals_11, buf18, 81920, grid=grid(81920), stream=stream0) del buf12 del buf14 del buf15 del buf16 del buf6 del primals_11 del primals_3 del primals_5 del primals_7 del primals_9 # Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf20 = reinterpret_tensor(buf17, (4, 256, 4, 4), (4096, 16, 4, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [net], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf19, primals_13, buf20, 1024, 16, grid=grid(1024, 16), stream=stream0) del buf19 del primals_13 return (buf20, buf0, primals_2, primals_4, buf1, buf2, buf3, primals_12, buf5, buf7, buf8, buf9, buf10, buf11, buf13, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 1280, 1, 1), (1280, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ASPP(nn.Module): def __init__(self, in_channel=256, depth=256): super(ASPP, self).__init__() self.mean = nn.AdaptiveAvgPool2d((1, 1)) self.conv = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6) self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12) self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18) self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1) def forward(self, x): size = x.shape[2:] image_features = self.mean(x) image_features = self.conv(image_features) image_features = F.interpolate(image_features, size=size, mode= 'bilinear', align_corners=True) atrous_block1 = self.atrous_block1(x) atrous_block6 = self.atrous_block6(x) atrous_block12 = self.atrous_block12(x) atrous_block18 = self.atrous_block18(x) net = self.conv_1x1_output(torch.cat([image_features, atrous_block1, atrous_block6, atrous_block12, atrous_block18], dim=1)) return net def get_inputs(): return [torch.rand([4, 256, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 1024 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 256 x1 = xindex // 256 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + (x0 + 256 * r2 + 4096 * x1), tmp0, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.full([1], 0, tl.int64) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex // 16 x2 = xindex // 16 % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tl.where(tmp7, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tl.where(tmp14, tmp13, tmp12) tmp16 = tmp11 - tmp11 tmp18 = tmp16 * tmp17 tmp19 = tmp11 + tmp18 tl.store(out_ptr0 + x6, tmp19, None) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 1280 x3 = xindex // 20480 x4 = xindex // 1280 % 16 x2 = xindex // 5120 % 4 x1 = xindex // 1280 % 4 x5 = xindex // 1280 x6 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x0 + 4096 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([XBLOCK], 1, tl.int32) tmp8 = tmp6 + tmp7 tmp9 = tmp6 < 0 tl.where(tmp9, tmp8, tmp6) tmp11 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp12 = tmp11 + tmp7 tmp13 = tmp11 < 0 tl.where(tmp13, tmp12, tmp11) tmp15 = tl.load(in_ptr3 + (256 * x3 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp16 = tl.load(in_ptr4 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp17 = tmp15 + tmp16 tmp18 = tl.load(in_ptr5 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp19 = tmp18 + tmp7 tmp20 = tmp18 < 0 tl.where(tmp20, tmp19, tmp18) tmp22 = tmp17 - tmp17 tmp23 = tl.load(in_ptr6 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp24 = tmp22 * tmp23 tmp25 = tmp17 + tmp24 tmp26 = tmp25 - tmp5 tmp27 = tl.load(in_ptr7 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp28 = tmp26 * tmp27 tmp29 = tmp5 + tmp28 tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp4, tmp29, tmp30) tmp32 = tmp0 >= tmp3 tmp33 = tl.full([1], 512, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tl.load(in_ptr8 + (256 * x5 + (-256 + x0)), tmp35, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr9 + (-256 + x0), tmp35, eviction_policy= 'evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp0 >= tmp33 tmp42 = tl.full([1], 768, tl.int64) tmp43 = tmp0 < tmp42 tmp44 = tmp41 & tmp43 tmp45 = tl.load(in_ptr10 + (256 * x5 + (-512 + x0)), tmp44, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr11 + (-512 + x0), tmp44, eviction_policy= 'evict_last', other=0.0) tmp47 = tmp45 + tmp46 tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp44, tmp47, tmp48) tmp50 = tmp0 >= tmp42 tmp51 = tl.full([1], 1024, tl.int64) tmp52 = tmp0 < tmp51 tmp53 = tmp50 & tmp52 tmp54 = tl.load(in_ptr12 + (256 * x5 + (-768 + x0)), tmp53, eviction_policy='evict_last', other=0.0) tmp55 = tl.load(in_ptr13 + (-768 + x0), tmp53, eviction_policy= 'evict_last', other=0.0) tmp56 = tmp54 + tmp55 tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp0 >= tmp51 tl.full([1], 1280, tl.int64) tmp62 = tl.load(in_ptr14 + (256 * x5 + (-1024 + x0)), tmp59, eviction_policy='evict_last', other=0.0) tmp63 = tl.load(in_ptr15 + (-1024 + x0), tmp59, eviction_policy= 'evict_last', other=0.0) tmp64 = tmp62 + tmp63 tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype) tmp66 = tl.where(tmp59, tmp64, tmp65) tmp67 = tl.where(tmp53, tmp58, tmp66) tmp68 = tl.where(tmp44, tmp49, tmp67) tmp69 = tl.where(tmp35, tmp40, tmp68) tmp70 = tl.where(tmp4, tmp31, tmp69) tl.store(out_ptr0 + x6, tmp70, None) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 4096 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 1280, 1, 1), (1280, 1, 1, 1)) assert_size_stride(primals_13, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf4 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024), torch.float32) buf5 = reinterpret_tensor(buf4, (4, 256, 1, 1), (256, 1, 256, 256), 0) del buf4 get_raw_stream(0) triton_per_fused_mean_0[grid(1024)](buf5, primals_1, buf0, 1024, 16, XBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_6, buf1, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_8, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_10, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = extern_kernels.convolution(buf5, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 1, 1), (256, 1, 256, 256)) buf7 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_2[grid(4)](buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_2[grid(4)](buf8, 4, XBLOCK=4, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_2[grid(4)](buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_2[grid(4)](buf10, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf11 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_3[grid(4)](buf11, 4, XBLOCK=4, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(16384)]( buf7, buf9, buf6, primals_3, buf10, buf11, buf12, 16384, XBLOCK =256, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_3[grid(4)](buf13, 4, XBLOCK=4, num_warps=1, num_stages=1) buf14 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf15 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf16 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf17 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(18, 18), dilation=(18, 18), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf18 = empty_strided_cuda((4, 1280, 4, 4), (20480, 1, 5120, 1280), torch.float32) triton_poi_fused_cat_5[grid(81920)](buf12, buf8, buf9, buf6, primals_3, buf10, buf11, buf13, buf14, primals_5, buf15, primals_7, buf16, primals_9, buf17, primals_11, buf18, 81920, XBLOCK=512, num_warps=8, num_stages=1) del buf12 del buf14 del buf15 del buf16 del buf6 del primals_11 del primals_3 del primals_5 del primals_7 del primals_9 buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf20 = reinterpret_tensor(buf17, (4, 256, 4, 4), (4096, 16, 4, 1), 0) del buf17 triton_poi_fused_convolution_6[grid(1024, 16)](buf19, primals_13, buf20, 1024, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf19 del primals_13 return (buf20, buf0, primals_2, primals_4, buf1, buf2, buf3, primals_12, buf5, buf7, buf8, buf9, buf10, buf11, buf13, buf18) class ASPPNew(nn.Module): def __init__(self, in_channel=256, depth=256): super(ASPPNew, self).__init__() self.mean = nn.AdaptiveAvgPool2d((1, 1)) self.conv = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6) self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12) self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18) self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = self.atrous_block1.weight primals_5 = self.atrous_block1.bias primals_6 = self.atrous_block6.weight primals_7 = self.atrous_block6.bias primals_8 = self.atrous_block12.weight primals_9 = self.atrous_block12.bias primals_10 = self.atrous_block18.weight primals_11 = self.atrous_block18.bias primals_12 = self.conv_1x1_output.weight primals_13 = self.conv_1x1_output.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
DoggyLiu0116/MamboNet
ASPP
false
5,113
[ "MIT" ]
1
3b708091422491f660c4bd5eb12b06ce3b8a5f79
https://github.com/DoggyLiu0116/MamboNet/tree/3b708091422491f660c4bd5eb12b06ce3b8a5f79
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channel=256, depth=256): super().__init__() self.mean = nn.AdaptiveAvgPool2d((1, 1)) self.conv = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6) self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12) self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18) self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1) def forward(self, x): size = x.shape[2:] image_features = self.mean(x) image_features = self.conv(image_features) image_features = F.interpolate(image_features, size=size, mode= 'bilinear', align_corners=True) atrous_block1 = self.atrous_block1(x) atrous_block6 = self.atrous_block6(x) atrous_block12 = self.atrous_block12(x) atrous_block18 = self.atrous_block18(x) net = self.conv_1x1_output(torch.cat([image_features, atrous_block1, atrous_block6, atrous_block12, atrous_block18], dim=1)) return net def get_inputs(): return [torch.rand([4, 256, 4, 4])] def get_init_inputs(): return []
TAGConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7v/c7vxymjug6cmd6ortn7oen7sxreksunepw2tsdycutut57s4epmp.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mm, %mm_1, %mm_2], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (16*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/i3/ci3ntqdazryccf7cw2urpyjx3oine2oobozzcepsiti6ioemdsor.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mm, %mm_1, %mm_2], -1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (16*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm] extern_kernels.mm(primals_2, buf0, out=buf1) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf2 = reinterpret_tensor(buf6, (4, 4), (16, 1), 12) # alias # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(primals_2, buf1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf6, (4, 4), (16, 1), 0) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf3, 16, grid=grid(16), stream=stream0) del primals_1 buf4 = reinterpret_tensor(buf6, (4, 4), (16, 1), 4) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf0, buf4, 16, grid=grid(16), stream=stream0) del buf0 buf5 = reinterpret_tensor(buf6, (4, 4), (16, 1), 8) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf5, 16, grid=grid(16), stream=stream0) buf7 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf6, reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7) del primals_3 del primals_4 return (buf7, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class TAGConv(nn.Module): def __init__(self, in_features, out_features, K=3, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.K = K self.w = nn.Linear(in_features * (self.K + 1), out_features, bias=bias) def reset_parameters(self): self.w.reset_parameters() def forward(self, x, adj): out = x xs = [x] for _ in range(self.K): out = adj.mm(out) xs.append(out) out = self.w(torch.cat(xs, dim=-1)) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 16 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 16 * x1), tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, buf0, out=buf1) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf2 = reinterpret_tensor(buf6, (4, 4), (16, 1), 12) extern_kernels.mm(primals_2, buf1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf6, (4, 4), (16, 1), 0) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](primals_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf4 = reinterpret_tensor(buf6, (4, 4), (16, 1), 4) triton_poi_fused_cat_1[grid(16)](buf0, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf5 = reinterpret_tensor(buf6, (4, 4), (16, 1), 8) triton_poi_fused_cat_1[grid(16)](buf1, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf1 del buf1 extern_kernels.addmm(primals_4, buf6, reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7) del primals_3 del primals_4 return buf7, buf6 class TAGConvNew(nn.Module): def __init__(self, in_features, out_features, K=3, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.K = K self.w = nn.Linear(in_features * (self.K + 1), out_features, bias=bias) def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})' ) def forward(self, input_0, input_1): primals_3 = self.w.weight primals_4 = self.w.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
EdisonLeeeee/Graphgallery
TAGConv
false
5,114
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, out_features, K=3, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.K = K self.w = nn.Linear(in_features * (self.K + 1), out_features, bias=bias) def reset_parameters(self): self.w.reset_parameters() def forward(self, x, adj): out = x xs = [x] for _ in range(self.K): out = adj.mm(out) xs.append(out) out = self.w(torch.cat(xs, dim=-1)) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
DAGNNConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jj/cjjiphzf24trw5axa2pmjeyaxi43ixlzlidljcludzymowb4xjf4.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] # Source node to ATen node mapping: # h => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mm, %mm_1, %mm_2, %mm_3, %mm_4, %mm_5, %mm_6, %mm_7, %mm_8, %mm_9], 1), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (44*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vz/cvz2b43gw7evjx7y4go5psaewdyw7vxpnuzbnsgioqvsaqvketru.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] # Source node to ATen node mapping: # h => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mm, %mm_1, %mm_2, %mm_3, %mm_4, %mm_5, %mm_6, %mm_7, %mm_8, %mm_9], 1), kwargs = {}) triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (44*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2v/c2v75im7azf6ydjugomrvbgk2qfdv2227zl2y3hb44a525i6c3ql.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_2,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 44 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_2, out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf0, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf1, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf2, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf3, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf4, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf5, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf6, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf7, out=buf8) buf20 = empty_strided_cuda((4, 44), (44, 1), torch.float32) buf9 = reinterpret_tensor(buf20, (4, 4), (44, 1), 40) # alias # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.mm] extern_kernels.mm(primals_1, buf8, out=buf9) del primals_1 buf10 = reinterpret_tensor(buf20, (4, 4), (44, 1), 0) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(primals_2, buf10, 16, grid=grid(16), stream=stream0) del primals_2 buf11 = reinterpret_tensor(buf20, (4, 4), (44, 1), 4) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf0, buf11, 16, grid=grid(16), stream=stream0) del buf0 buf12 = reinterpret_tensor(buf20, (4, 4), (44, 1), 8) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf1, buf12, 16, grid=grid(16), stream=stream0) del buf1 buf13 = reinterpret_tensor(buf20, (4, 4), (44, 1), 12) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf2, buf13, 16, grid=grid(16), stream=stream0) del buf2 buf14 = reinterpret_tensor(buf20, (4, 4), (44, 1), 16) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_0.run(buf3, buf14, 16, grid=grid(16), stream=stream0) del buf3 buf15 = reinterpret_tensor(buf20, (4, 4), (44, 1), 20) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf4, buf15, 16, grid=grid(16), stream=stream0) del buf4 buf16 = reinterpret_tensor(buf20, (4, 4), (44, 1), 24) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf5, buf16, 16, grid=grid(16), stream=stream0) del buf5 buf17 = reinterpret_tensor(buf20, (4, 4), (44, 1), 28) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf6, buf17, 16, grid=grid(16), stream=stream0) del buf6 buf18 = reinterpret_tensor(buf20, (4, 4), (44, 1), 32) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_0.run(buf7, buf18, 16, grid=grid(16), stream=stream0) del buf7 buf19 = reinterpret_tensor(buf20, (4, 4), (44, 1), 36) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(buf8, buf19, 16, grid=grid(16), stream=stream0) buf21 = empty_strided_cuda((44, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf20, (44, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), out=buf21) del primals_3 buf22 = empty_strided_cuda((4, 11, 1), (11, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf21, buf22, 44, grid=grid(44), stream=stream0) buf23 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf22, (4, 1, 11), (11, 0, 1), 0), reinterpret_tensor(buf20, (4, 11, 4), (44, 4, 1), 0), out=buf23) del buf22 return (reinterpret_tensor(buf23, (4, 4), (4, 1), 0), buf20, buf21, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DAGNNConv(nn.Module): def __init__(self, in_features, out_features=1, K=10, bias=False): super().__init__() assert out_features == 1, "'out_features' must be 1" self.in_features = in_features self.out_features = out_features self.lin = nn.Linear(in_features, out_features, bias=bias) self.K = K self.act = nn.Sigmoid() def reset_parameters(self): self.lin.reset_parameters() def forward(self, x, adj): propagations = [x] for _ in range(self.K): x = adj.mm(x) propagations.append(x) h = torch.stack(propagations, dim=1) retain_score = self.act(self.lin(h)).permute(0, 2, 1).contiguous() out = (retain_score @ h).squeeze(1) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 44 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 44 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 44 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf0, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf1, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf2, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf3, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf4, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf5, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf6, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, buf7, out=buf8) buf20 = empty_strided_cuda((4, 44), (44, 1), torch.float32) buf9 = reinterpret_tensor(buf20, (4, 4), (44, 1), 40) extern_kernels.mm(primals_1, buf8, out=buf9) del primals_1 buf10 = reinterpret_tensor(buf20, (4, 4), (44, 1), 0) get_raw_stream(0) triton_poi_fused_stack_0[grid(16)](primals_2, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf11 = reinterpret_tensor(buf20, (4, 4), (44, 1), 4) triton_poi_fused_stack_1[grid(16)](buf0, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf12 = reinterpret_tensor(buf20, (4, 4), (44, 1), 8) triton_poi_fused_stack_1[grid(16)](buf1, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 buf13 = reinterpret_tensor(buf20, (4, 4), (44, 1), 12) triton_poi_fused_stack_1[grid(16)](buf2, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf14 = reinterpret_tensor(buf20, (4, 4), (44, 1), 16) triton_poi_fused_stack_0[grid(16)](buf3, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 buf15 = reinterpret_tensor(buf20, (4, 4), (44, 1), 20) triton_poi_fused_stack_1[grid(16)](buf4, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 buf16 = reinterpret_tensor(buf20, (4, 4), (44, 1), 24) triton_poi_fused_stack_1[grid(16)](buf5, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 buf17 = reinterpret_tensor(buf20, (4, 4), (44, 1), 28) triton_poi_fused_stack_1[grid(16)](buf6, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 buf18 = reinterpret_tensor(buf20, (4, 4), (44, 1), 32) triton_poi_fused_stack_0[grid(16)](buf7, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf7 buf19 = reinterpret_tensor(buf20, (4, 4), (44, 1), 36) triton_poi_fused_stack_1[grid(16)](buf8, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((44, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (44, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), out=buf21) del primals_3 buf22 = empty_strided_cuda((4, 11, 1), (11, 1, 1), torch.float32) triton_poi_fused_sigmoid_2[grid(44)](buf21, buf22, 44, XBLOCK=64, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0) del buf8 extern_kernels.bmm(reinterpret_tensor(buf22, (4, 1, 11), (11, 0, 1), 0), reinterpret_tensor(buf20, (4, 11, 4), (44, 4, 1), 0), out=buf23 ) del buf22 return reinterpret_tensor(buf23, (4, 4), (4, 1), 0), buf20, buf21 class DAGNNConvNew(nn.Module): def __init__(self, in_features, out_features=1, K=10, bias=False): super().__init__() assert out_features == 1, "'out_features' must be 1" self.in_features = in_features self.out_features = out_features self.lin = nn.Linear(in_features, out_features, bias=bias) self.K = K self.act = nn.Sigmoid() def reset_parameters(self): self.lin.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})' ) def forward(self, input_0, input_1): primals_3 = self.lin.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
EdisonLeeeee/Graphgallery
DAGNNConv
false
5,115
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, out_features=1, K=10, bias=False): super().__init__() assert out_features == 1, "'out_features' must be 1" self.in_features = in_features self.out_features = out_features self.lin = nn.Linear(in_features, out_features, bias=bias) self.K = K self.act = nn.Sigmoid() def reset_parameters(self): self.lin.reset_parameters() def forward(self, x, adj): propagations = [x] for _ in range(self.K): x = adj.mm(x) propagations.append(x) h = torch.stack(propagations, dim=1) retain_score = self.act(self.lin(h)).permute(0, 2, 1).contiguous() out = (retain_score @ h).squeeze(1) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4]
Attn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nj/cnjqzm7hm3u6ggjfvpspnl6pii56jckgwol3ydau4qesjyoteutl.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %permute_1], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = (xindex // 32) x1 = (xindex // 8) % 4 x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x2) + (16*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh] # Source node to ATen node mapping: # energy => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/i5/ci57psuuueutwfqpm57dmpddhnflxjjxpqzf6cwcsnd2zbemfstl.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_5, [4, 1]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lt/cltwbpokq7b7gvah2tjf27qlzw6vpmwfuzs3xfk7mhbxym753kvi.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rr/crrmj7r54x5uk325xkhuskxp4m5prz3fpx53yc2st4o5pwbhq32p.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_5, buf3, 16, grid=grid(16), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 16, grid=grid(16), stream=stream0) del buf5 return (reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class Attn(nn.Module): def __init__(self, hidden): super().__init__() self.hidden = hidden self.attn = nn.Linear(self.hidden * 2, hidden) self.v = nn.Parameter(torch.rand(hidden)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, enc_out): l = enc_out.size(0) enc_out.size(1) H = hidden.repeat(l, 1, 1).transpose(0, 1) enc_out = enc_out.transpose(0, 1) attn_score = self.score(H, enc_out) return F.softmax(attn_score, dim=1).unsqueeze(1) def score(self, hidden, enc_out): """ concat score function score(s_t, h_i) = vT_a tanh(Wa[s_t; h_i]) """ energy = torch.tanh(self.attn(torch.cat([hidden, enc_out], 2))) energy = energy.transpose(2, 1) v = self.v.repeat(enc_out.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0) class AttnNew(nn.Module): def __init__(self, hidden): super().__init__() self.hidden = hidden self.attn = nn.Linear(self.hidden * 2, hidden) self.v = nn.Parameter(torch.rand(hidden)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def score(self, hidden, enc_out): """ concat score function score(s_t, h_i) = vT_a tanh(Wa[s_t; h_i]) """ energy = torch.tanh(self.attn(torch.cat([hidden, enc_out], 2))) energy = energy.transpose(2, 1) v = self.v.repeat(enc_out.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def forward(self, input_0, input_1): primals_4 = self.v primals_3 = self.attn.weight primals_5 = self.attn.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Eddie-Hwang/Co-Eye_Motion_Generation
Attn
false
5,116
[ "MIT" ]
1
8e244680115fb63bc26018cb6b53bcfbd04e9683
https://github.com/Eddie-Hwang/Co-Eye_Motion_Generation/tree/8e244680115fb63bc26018cb6b53bcfbd04e9683
import math import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hidden): super().__init__() self.hidden = hidden self.attn = nn.Linear(self.hidden * 2, hidden) self.v = nn.Parameter(torch.rand(hidden)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, enc_out): l = enc_out.size(0) enc_out.size(1) H = hidden.repeat(l, 1, 1).transpose(0, 1) enc_out = enc_out.transpose(0, 1) attn_score = self.score(H, enc_out) return F.softmax(attn_score, dim=1).unsqueeze(1) def score(self, hidden, enc_out): """ concat score function score(s_t, h_i) = vT_a tanh(Wa[s_t; h_i]) """ energy = torch.tanh(self.attn(torch.cat([hidden, enc_out], 2))) energy = energy.transpose(2, 1) v = self.v.repeat(enc_out.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
SpectralEigenConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/37/c37syvo63vdmotuxpfmllazayh63ckflzom725nffg466fmtlhph.py # Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, x_out_11, mul_10, x_out_12], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # mul_1 => mul_1 # mul_10 => mul_10 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_8 => mul_8 # mul_9 => mul_9 # x_out_1 => mul # x_out_10 => add_9 # x_out_11 => div # x_out_12 => add_10 # x_out_2 => add_1 # x_out_3 => add_2 # x_out_4 => add_3 # x_out_5 => add_4 # x_out_6 => add_5 # x_out_7 => add_6 # x_out_8 => add_7 # x_out_9 => add_8 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_1, 0.9), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 0.9), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_3, 0.9), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 0.9), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_5, 0.9), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_6, 0.9), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_7, 0.9), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_6), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_8, 0.9), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_9, 0.9), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_8), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_10, 0.9), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_9), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_9, 10), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 0.1), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_10), kwargs = {}) triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (x0), xmask) tmp6 = tl.load(in_ptr1 + (x0), xmask) tmp9 = tl.load(in_ptr2 + (x0), xmask) tmp12 = tl.load(in_ptr3 + (x0), xmask) tmp15 = tl.load(in_ptr4 + (x0), xmask) tmp18 = tl.load(in_ptr5 + (x0), xmask) tmp21 = tl.load(in_ptr6 + (x0), xmask) tmp24 = tl.load(in_ptr7 + (x0), xmask) tmp27 = tl.load(in_ptr8 + (x0), xmask) tmp32 = tl.load(in_out_ptr1 + (x0), xmask) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp1 tmp14 = tmp11 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp1 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp1 tmp23 = tmp20 + tmp22 tmp25 = tmp24 * tmp1 tmp26 = tmp23 + tmp25 tmp28 = tmp27 * tmp1 tmp29 = tmp26 + tmp28 tmp30 = 0.1 tmp31 = tmp29 * tmp30 tmp33 = tmp32 * tmp30 tmp34 = tmp31 + tmp33 tl.store(in_out_ptr1 + (x0), tmp34, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf1, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf2, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf4, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf5, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf6, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf7, out=buf8) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf8, out=buf9) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf9, out=buf11) buf10 = buf1; del buf1 # reuse buf12 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, x_out_11, mul_10, x_out_12], Original ATen: [aten.add, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_0.run(buf10, buf12, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf11, 16, grid=grid(16), stream=stream0) del buf10 del buf11 del buf2 del buf3 del buf4 del buf5 del buf6 del buf7 del buf8 del buf9 return (buf12, primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SpectralEigenConv(nn.Module): def __init__(self, in_features, out_features, bias=False, K=10, alpha= 0.1, **kwargs): super().__init__() assert K > 0 self.K = K self.alpha = alpha self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) def forward(self, x, U, V=None): """ x: node attribute matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.w(x) if V is not None: V_pow = torch.ones_like(V) V_out = torch.zeros_like(V) for _ in range(self.K): V_pow *= V V_out += (1 - self.alpha) * V_pow V_out = V_out / self.K x_out = U * V_out @ (U.t() @ x) + self.alpha * x else: adj = U x_in = x x_out = torch.zeros_like(x) for _ in range(self.K): x = torch.spmm(adj, x) x_out += (1 - self.alpha) * x x_out /= self.K x_out += self.alpha * x_in return x_out def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp9 = tl.load(in_ptr2 + x0, xmask) tmp12 = tl.load(in_ptr3 + x0, xmask) tmp15 = tl.load(in_ptr4 + x0, xmask) tmp18 = tl.load(in_ptr5 + x0, xmask) tmp21 = tl.load(in_ptr6 + x0, xmask) tmp24 = tl.load(in_ptr7 + x0, xmask) tmp27 = tl.load(in_ptr8 + x0, xmask) tmp32 = tl.load(in_out_ptr1 + x0, xmask) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp1 tmp14 = tmp11 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp1 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp1 tmp23 = tmp20 + tmp22 tmp25 = tmp24 * tmp1 tmp26 = tmp23 + tmp25 tmp28 = tmp27 * tmp1 tmp29 = tmp26 + tmp28 tmp30 = 0.1 tmp31 = tmp29 * tmp30 tmp33 = tmp32 * tmp30 tmp34 = tmp31 + tmp33 tl.store(in_out_ptr1 + x0, tmp34, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf1, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf2, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf4, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf5, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf6, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf7, out=buf8) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf8, out=buf9) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf9, out=buf11) buf10 = buf1 del buf1 buf12 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_0[grid(16)](buf10, buf12, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf10 del buf11 del buf2 del buf3 del buf4 del buf5 del buf6 del buf7 del buf8 del buf9 return buf12, primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0) class SpectralEigenConvNew(nn.Module): def __init__(self, in_features, out_features, bias=False, K=10, alpha= 0.1, **kwargs): super().__init__() assert K > 0 self.K = K self.alpha = alpha self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})' ) def forward(self, input_0, input_1): primals_1 = self.w.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
EdisonLeeeee/Graphgallery
SpectralEigenConv
false
5,117
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, out_features, bias=False, K=10, alpha= 0.1, **kwargs): super().__init__() assert K > 0 self.K = K self.alpha = alpha self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) def forward(self, x, U, V=None): """ x: node attribute matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.w(x) if V is not None: V_pow = torch.ones_like(V) V_out = torch.zeros_like(V) for _ in range(self.K): V_pow *= V V_out += (1 - self.alpha) * V_pow V_out = V_out / self.K x_out = U * V_out @ (U.t() @ x) + self.alpha * x else: adj = U x_in = x x_out = torch.zeros_like(x) for _ in range(self.K): x = torch.spmm(adj, x) x_out += (1 - self.alpha) * x x_out /= self.K x_out += self.alpha * x_in return x_out def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})' ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
CO2Regularizer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [out0], Original ATen: [aten.div] # Source node to ATen node mapping: # out0 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out0], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out1], Original ATen: [aten.div] triton_poi_fused_div_0.run(arg1_1, buf1, 16, grid=grid(16), stream=stream0) del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MemoryBankModule(torch.nn.Module): """Memory bank implementation This is a parent class to all loss functions implemented by the lightly Python package. This way, any loss can be used with a memory bank if desired. Attributes: size: Number of keys the memory bank can store. If set to 0, memory bank is not used. Examples: >>> class MyLossFunction(MemoryBankModule): >>> >>> def __init__(self, memory_bank_size: int = 2 ** 16): >>> super(MyLossFunction, self).__init__(memory_bank_size) >>> >>> def forward(self, output: torch.Tensor, >>> labels: torch.Tensor = None): >>> >>> output, negatives = super( >>> MyLossFunction, self).forward(output) >>> >>> if negatives is not None: >>> # evaluate loss with negative samples >>> else: >>> # evaluate loss without negative samples """ def __init__(self, size: 'int'=2 ** 16): super(MemoryBankModule, self).__init__() if size < 0: msg = f'Illegal memory bank size {size}, must be non-negative.' raise ValueError(msg) self.size = size self.bank = None self.bank_ptr = None @torch.no_grad() def _init_memory_bank(self, dim: 'int'): """Initialize the memory bank if it's empty Args: dim: The dimension of the which are stored in the bank. """ self.bank = torch.randn(dim, self.size) self.bank = torch.nn.functional.normalize(self.bank, dim=0) self.bank_ptr = torch.LongTensor([0]) @torch.no_grad() def _dequeue_and_enqueue(self, batch: 'torch.Tensor'): """Dequeue the oldest batch and add the latest one Args: batch: The latest batch of keys to add to the memory bank. """ batch_size = batch.shape[0] ptr = int(self.bank_ptr) if ptr + batch_size >= self.size: self.bank[:, ptr:] = batch[:self.size - ptr].T.detach() self.bank_ptr[0] = 0 else: self.bank[:, ptr:ptr + batch_size] = batch.T.detach() self.bank_ptr[0] = ptr + batch_size def forward(self, output: 'torch.Tensor', labels: 'torch.Tensor'=None, update: 'bool'=False): """Query memory bank for additional negative samples Args: output: The output of the model. labels: Should always be None, will be ignored. Returns: The output if the memory bank is of size 0, otherwise the output and the entries from the memory bank. """ if self.size == 0: return output, None _, dim = output.shape if self.bank is None: self._init_memory_bank(dim) bank = self.bank.clone().detach() if update: self._dequeue_and_enqueue(output) return output, bank class CO2Regularizer(MemoryBankModule): """Implementation of the CO2 regularizer [0] for self-supervised learning. [0] CO2, 2021, https://arxiv.org/abs/2010.02217 Attributes: alpha: Weight of the regularization term. t_consistency: Temperature used during softmax calculations. memory_bank_size: Number of negative samples to store in the memory bank. Use 0 to use the second batch for negative samples. Examples: >>> # initialize loss function for MoCo >>> loss_fn = NTXentLoss(memory_bank_size=4096) >>> >>> # initialize CO2 regularizer >>> co2 = CO2Regularizer(alpha=1.0, memory_bank_size=4096) >>> >>> # generate two random trasnforms of images >>> t0 = transforms(images) >>> t1 = transforms(images) >>> >>> # feed through the MoCo model >>> out0, out1 = model(t0, t1) >>> >>> # calculate loss and apply regularizer >>> loss = loss_fn(out0, out1) + co2(out0, out1) """ def __init__(self, alpha: 'float'=1, t_consistency: 'float'=0.05, memory_bank_size: 'int'=0): super(CO2Regularizer, self).__init__(size=memory_bank_size) self.log_target = True try: self.kl_div = torch.nn.KLDivLoss(reduction='batchmean', log_target=True) except TypeError: self.log_target = False self.kl_div = torch.nn.KLDivLoss(reduction='batchmean') self.t_consistency = t_consistency self.alpha = alpha def _get_pseudo_labels(self, out0: 'torch.Tensor', out1: 'torch.Tensor', negatives: 'torch.Tensor'=None): """Computes the soft pseudo labels across negative samples. Args: out0: Output projections of the first set of transformed images (query). Shape: bsz x n_ftrs out1: Output projections of the second set of transformed images (positive sample). Shape: bsz x n_ftrs negatives: Negative samples to compare against. If this is None, the second batch of images will be used as negative samples. Shape: memory_bank_size x n_ftrs Returns: Log probability that a positive samples will classify each negative sample as the positive sample. Shape: bsz x (bsz - 1) or bsz x memory_bank_size """ batch_size, _ = out0.shape if negatives is None: l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1) l_neg = torch.einsum('nc,ck->nk', [out0, out1.t()]) l_neg = l_neg.masked_select(~torch.eye(batch_size, dtype=bool, device=l_neg.device)).view(batch_size, batch_size - 1) else: negatives = negatives l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1) l_neg = torch.einsum('nc,ck->nk', [out0, negatives.clone(). detach()]) logits = torch.cat([l_pos, l_neg], dim=1) logits = logits / self.t_consistency return torch.nn.functional.log_softmax(logits, dim=-1) def forward(self, out0: 'torch.Tensor', out1: 'torch.Tensor'): """Computes the CO2 regularization term for two model outputs. Args: out0: Output projections of the first set of transformed images. out1: Output projections of the second set of transformed images. Returns: The regularization term multiplied by the weight factor alpha. """ out0 = torch.nn.functional.normalize(out0, dim=1) out1 = torch.nn.functional.normalize(out1, dim=1) out1, negatives = super(CO2Regularizer, self).forward(out1, update=True ) p = self._get_pseudo_labels(out0, out1, negatives) q = self._get_pseudo_labels(out1, out0, negatives) if self.log_target: div = self.kl_div(p, q) + self.kl_div(q, p) else: div = self.kl_div(p, torch.exp(q)) + self.kl_div(q, torch.exp(p)) return self.alpha * 0.5 * div def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 return buf0, buf1 class MemoryBankModule(torch.nn.Module): """Memory bank implementation This is a parent class to all loss functions implemented by the lightly Python package. This way, any loss can be used with a memory bank if desired. Attributes: size: Number of keys the memory bank can store. If set to 0, memory bank is not used. Examples: >>> class MyLossFunction(MemoryBankModule): >>> >>> def __init__(self, memory_bank_size: int = 2 ** 16): >>> super(MyLossFunction, self).__init__(memory_bank_size) >>> >>> def forward(self, output: torch.Tensor, >>> labels: torch.Tensor = None): >>> >>> output, negatives = super( >>> MyLossFunction, self).forward(output) >>> >>> if negatives is not None: >>> # evaluate loss with negative samples >>> else: >>> # evaluate loss without negative samples """ def __init__(self, size: 'int'=2 ** 16): super(MemoryBankModule, self).__init__() if size < 0: msg = f'Illegal memory bank size {size}, must be non-negative.' raise ValueError(msg) self.size = size self.bank = None self.bank_ptr = None @torch.no_grad() def _init_memory_bank(self, dim: 'int'): """Initialize the memory bank if it's empty Args: dim: The dimension of the which are stored in the bank. """ self.bank = torch.randn(dim, self.size) self.bank = torch.nn.functional.normalize(self.bank, dim=0) self.bank_ptr = torch.LongTensor([0]) @torch.no_grad() def _dequeue_and_enqueue(self, batch: 'torch.Tensor'): """Dequeue the oldest batch and add the latest one Args: batch: The latest batch of keys to add to the memory bank. """ batch_size = batch.shape[0] ptr = int(self.bank_ptr) if ptr + batch_size >= self.size: self.bank[:, ptr:] = batch[:self.size - ptr].T.detach() self.bank_ptr[0] = 0 else: self.bank[:, ptr:ptr + batch_size] = batch.T.detach() self.bank_ptr[0] = ptr + batch_size def forward(self, output: 'torch.Tensor', labels: 'torch.Tensor'=None, update: 'bool'=False): """Query memory bank for additional negative samples Args: output: The output of the model. labels: Should always be None, will be ignored. Returns: The output if the memory bank is of size 0, otherwise the output and the entries from the memory bank. """ if self.size == 0: return output, None _, dim = output.shape if self.bank is None: self._init_memory_bank(dim) bank = self.bank.clone().detach() if update: self._dequeue_and_enqueue(output) return output, bank class CO2RegularizerNew(MemoryBankModule): """Implementation of the CO2 regularizer [0] for self-supervised learning. [0] CO2, 2021, https://arxiv.org/abs/2010.02217 Attributes: alpha: Weight of the regularization term. t_consistency: Temperature used during softmax calculations. memory_bank_size: Number of negative samples to store in the memory bank. Use 0 to use the second batch for negative samples. Examples: >>> # initialize loss function for MoCo >>> loss_fn = NTXentLoss(memory_bank_size=4096) >>> >>> # initialize CO2 regularizer >>> co2 = CO2Regularizer(alpha=1.0, memory_bank_size=4096) >>> >>> # generate two random trasnforms of images >>> t0 = transforms(images) >>> t1 = transforms(images) >>> >>> # feed through the MoCo model >>> out0, out1 = model(t0, t1) >>> >>> # calculate loss and apply regularizer >>> loss = loss_fn(out0, out1) + co2(out0, out1) """ def __init__(self, alpha: 'float'=1, t_consistency: 'float'=0.05, memory_bank_size: 'int'=0): super(CO2RegularizerNew, self).__init__(size=memory_bank_size) self.log_target = True try: self.kl_div = torch.nn.KLDivLoss(reduction='batchmean', log_target=True) except TypeError: self.log_target = False self.kl_div = torch.nn.KLDivLoss(reduction='batchmean') self.t_consistency = t_consistency self.alpha = alpha def _get_pseudo_labels(self, out0: 'torch.Tensor', out1: 'torch.Tensor', negatives: 'torch.Tensor'=None): """Computes the soft pseudo labels across negative samples. Args: out0: Output projections of the first set of transformed images (query). Shape: bsz x n_ftrs out1: Output projections of the second set of transformed images (positive sample). Shape: bsz x n_ftrs negatives: Negative samples to compare against. If this is None, the second batch of images will be used as negative samples. Shape: memory_bank_size x n_ftrs Returns: Log probability that a positive samples will classify each negative sample as the positive sample. Shape: bsz x (bsz - 1) or bsz x memory_bank_size """ batch_size, _ = out0.shape if negatives is None: l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1) l_neg = torch.einsum('nc,ck->nk', [out0, out1.t()]) l_neg = l_neg.masked_select(~torch.eye(batch_size, dtype=bool, device=l_neg.device)).view(batch_size, batch_size - 1) else: negatives = negatives l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1) l_neg = torch.einsum('nc,ck->nk', [out0, negatives.clone(). detach()]) logits = torch.cat([l_pos, l_neg], dim=1) logits = logits / self.t_consistency return torch.nn.functional.log_softmax(logits, dim=-1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
EelcoHoogendoorn/lightly
CO2Regularizer
false
5,118
[ "MIT" ]
1
98e0148967738404fa7f45196ec5eabfe00cd22e
https://github.com/EelcoHoogendoorn/lightly/tree/98e0148967738404fa7f45196ec5eabfe00cd22e
import torch class MemoryBankModule(torch.nn.Module): """Memory bank implementation This is a parent class to all loss functions implemented by the lightly Python package. This way, any loss can be used with a memory bank if desired. Attributes: size: Number of keys the memory bank can store. If set to 0, memory bank is not used. Examples: >>> class MyLossFunction(MemoryBankModule): >>> >>> def __init__(self, memory_bank_size: int = 2 ** 16): >>> super().__init__(memory_bank_size) >>> >>> def forward(self, output: torch.Tensor, >>> labels: torch.Tensor = None): >>> >>> output, negatives = super( >>> MyLossFunction, self).forward(output) >>> >>> if negatives is not None: >>> # evaluate loss with negative samples >>> else: >>> # evaluate loss without negative samples """ def __init__(self, size: 'int'=2 ** 16): super().__init__() if size < 0: msg = f'Illegal memory bank size {size}, must be non-negative.' raise ValueError(msg) self.size = size self.bank = None self.bank_ptr = None @torch.no_grad() def _init_memory_bank(self, dim: 'int'): """Initialize the memory bank if it's empty Args: dim: The dimension of the which are stored in the bank. """ self.bank = torch.randn(dim, self.size) self.bank = torch.nn.functional.normalize(self.bank, dim=0) self.bank_ptr = torch.LongTensor([0]) @torch.no_grad() def _dequeue_and_enqueue(self, batch: 'torch.Tensor'): """Dequeue the oldest batch and add the latest one Args: batch: The latest batch of keys to add to the memory bank. """ batch_size = batch.shape[0] ptr = int(self.bank_ptr) if ptr + batch_size >= self.size: self.bank[:, ptr:] = batch[:self.size - ptr].T.detach() self.bank_ptr[0] = 0 else: self.bank[:, ptr:ptr + batch_size] = batch.T.detach() self.bank_ptr[0] = ptr + batch_size def forward(self, output: 'torch.Tensor', labels: 'torch.Tensor'=None, update: 'bool'=False): """Query memory bank for additional negative samples Args: output: The output of the model. labels: Should always be None, will be ignored. Returns: The output if the memory bank is of size 0, otherwise the output and the entries from the memory bank. """ if self.size == 0: return output, None _, dim = output.shape if self.bank is None: self._init_memory_bank(dim) bank = self.bank.clone().detach() if update: self._dequeue_and_enqueue(output) return output, bank class Model(MemoryBankModule): """Implementation of the CO2 regularizer [0] for self-supervised learning. [0] CO2, 2021, https://arxiv.org/abs/2010.02217 Attributes: alpha: Weight of the regularization term. t_consistency: Temperature used during softmax calculations. memory_bank_size: Number of negative samples to store in the memory bank. Use 0 to use the second batch for negative samples. Examples: >>> # initialize loss function for MoCo >>> loss_fn = NTXentLoss(memory_bank_size=4096) >>> >>> # initialize CO2 regularizer >>> co2 = CO2Regularizer(alpha=1.0, memory_bank_size=4096) >>> >>> # generate two random trasnforms of images >>> t0 = transforms(images) >>> t1 = transforms(images) >>> >>> # feed throug # ... truncated (>4000 chars) for memory efficiency
SmallTransformerNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xi/cxi3ssslzv45liamqvbt6decmfms5gkzbjn7dtainfaa436qkyw3.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 62208 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 72 x1 = (xindex // 72) % 72 x2 = (xindex // 5184) x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-4) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-4) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zp/czpuakvx3zciuzfmemejrltenkqbzqirfyy2fnfbmrorwkdndz6e.py # Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # instance_norm => add, rsqrt, var_mean # out_1 => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_red_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 32 tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce( tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0 ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford( tmp4_mean, tmp4_m2, tmp4_weight, 1 ) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + (x3), tmp4, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/in/ciny2bql3sygecchlvr6rxw73jnhl7dgi3s5w2g2fefaoug53zzz.py # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] # Source node to ATen node mapping: # instance_norm => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 % 32), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ii/ciidusl6utkne6h3zmwx3jccsnttcsdc42mtp3vanldcnxv4y7ov.py # Topologically Sorted Source Nodes: [y, out_2], Original ATen: [aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_2 => _unsafe_index_2, _unsafe_index_3 # y => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_6, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_6]), kwargs = {}) triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 557568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = (xindex // 66) % 66 x2 = (xindex // 4356) x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ic/cic3dzeenec5pmb2ui2xub4tyfzm2lumrelwp2ceddod4wteiszi.py # Topologically Sorted Source Nodes: [out_3, instance_norm_1, y_1], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # instance_norm_1 => add_2, repeat_2, repeat_3, rsqrt_1, var_mean_1 # out_3 => convolution_1 # y_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {}) # %repeat_3 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_9, [4]), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x0 % 64), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 % 64), None, eviction_policy='evict_last') tmp2 = tl.load(in_out_ptr0 + (r3 + (1024*x0)), None) tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = tl.broadcast_to(tmp5, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.full([1], 1024, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 1024.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp4 - tmp12 tmp24 = tmp23 * tmp22 tmp25 = tmp24 * tmp0 tmp26 = tmp25 + tmp1 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tl.store(out_ptr0 + (x0), tmp0, None) tl.store(out_ptr1 + (x0), tmp1, None) tl.store(in_out_ptr0 + (r3 + (1024*x0)), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp22, None) tl.store(out_ptr3 + (r3 + (1024*x0)), tmp28, None) tl.store(out_ptr2 + (x0), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qz/cqzfdwvqtcv7v2mrmehml5qmajbmtq7w5hauph5arji53u5hu3rh.py # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out_4 => _unsafe_index_4, _unsafe_index_5 # Graph fragment: # %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_11, None]), kwargs = {}) # %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_11]), kwargs = {}) triton_poi_fused_reflection_pad2d_5 = async_compile.triton('triton_poi_fused_reflection_pad2d_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = (xindex // 34) % 34 x2 = (xindex // 1156) x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sa/csassjevuloxfdtjhbvssf3x6vih7g643rvk7jcuxhtqhpjdasl2.py # Topologically Sorted Source Nodes: [out_5, instance_norm_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # instance_norm_2 => add_4, rsqrt_2, var_mean_2 # out_5 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_6 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_6(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp20, None) tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b2/cb2o7yjwyg4m73hurg7thah3inbjxxhlpp2ascrkmlfgczwiltf4.py # Topologically Sorted Source Nodes: [instance_norm_2], Original ATen: [aten.repeat] # Source node to ATen node mapping: # instance_norm_2 => repeat_4 # Graph fragment: # %repeat_4 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_12, [4]), kwargs = {}) triton_poi_fused_repeat_7 = async_compile.triton('triton_poi_fused_repeat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fl/cfl4cjmpc5glftbeodnyc5p7tite3lee2q3i2dxyaktxa3lscoye.py # Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_6 => relu_2 # out_7 => _unsafe_index_6, _unsafe_index_7 # Graph fragment: # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %sub_11, None]), kwargs = {}) # %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_11]), kwargs = {}) triton_poi_fused_reflection_pad2d_relu_8 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = (xindex // 34) % 34 x2 = (xindex // 1156) x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xx/cxxvy624iqqukvhkqgek4g4e4mobmm4glugtq47uz5dz7a3lmmoe.py # Topologically Sorted Source Nodes: [out_8, out_9, out_10], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add] # Source node to ATen node mapping: # out_10 => add_8 # out_8 => convolution_3 # out_9 => add_6, repeat_6, rsqrt_3, var_mean_3 # Graph fragment: # %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat_6 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_16, [4]), kwargs = {}) # %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %relu_1), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x0 % 64), None, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + (1024*x0)), None) tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp27 = tl.load(in_out_ptr1 + (r3 + (1024*x0)), None) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = tl.broadcast_to(tmp4, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.full([1], 1024, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp11 tmp18 = 1024.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tmp23 * tmp0 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tl.store(out_ptr0 + (x0), tmp0, None) tl.store(in_out_ptr0 + (r3 + (1024*x0)), tmp3, None) tl.store(in_out_ptr1 + (r3 + (1024*x0)), tmp28, None) tl.store(out_ptr3 + (x0), tmp22, None) tl.store(out_ptr1 + (x0), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jb/cjb5tfoaglzi7xkxvj4gonqvuiqulf6o3ix2yzanofvpv4uauksl.py # Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # out_15 => convolution_5 # out_16 => add_11, rsqrt_5, var_mean_5 # Graph fragment: # %convolution_5 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_11, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {}) # %rsqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_11,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_10 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_10(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None) tl.store(out_ptr2 + (x3), tmp20, None) tl.store(out_ptr0 + (x3), tmp10, None) tl.store(out_ptr1 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6t/c6tfw2b3ya4gtid5akllte3lypvyv7e5lke32g3d2d2immxexzpv.py # Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange] # Source node to ATen node mapping: # x_in => iota_12 # Graph fragment: # %iota_12 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) triton_poi_fused_arange_11 = async_compile.triton('triton_poi_fused_arange_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_11(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wi/cwivvwhbvwrphtzexppvwxzqlr2mamv45fab7upa3wymzmylszsw.py # Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # x_in => add_14, add_15, convert_element_type, convert_element_type_1, mul_12, mul_13 # Graph fragment: # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_12, 1), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_14, torch.float32), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_15, 0.5), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_13, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_12 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/a6/ca6b3xi52usetx4j54r6syzzi3iw5ufhsj5wfo7stjuoy4wggabb.py # Topologically Sorted Source Nodes: [out_17, x_in, out_18], Original ATen: [aten.add, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_17 => add_13 # out_18 => _unsafe_index_13, _unsafe_index_14 # x_in => _unsafe_index_12 # Graph fragment: # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %add_8), kwargs = {}) # %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_13, [None, None, %unsqueeze_24, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_12, [None, None, %sub_6, None]), kwargs = {}) # %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_13, [None, None, None, %sub_6]), kwargs = {}) triton_poi_fused__unsafe_index_add_reflection_pad2d_13 = async_compile.triton('triton_poi_fused__unsafe_index_add_reflection_pad2d_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_reflection_pad2d_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_reflection_pad2d_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 66) % 66 x0 = xindex % 66 x4 = (xindex // 4356) x2 = (xindex // 4356) % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x4)), xmask, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = 1024.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp11 * tmp17 tmp20 = tmp18 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr6 + (tmp8 + (32*tmp4) + (1024*x4)), xmask, eviction_policy='evict_last') tmp24 = tmp22 + tmp23 tl.store(out_ptr0 + (x7), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yu/cyuvfthsckflubdqn3lemf2ujnq7tbpvcwcyonjdfkirbb5wyaal.py # Topologically Sorted Source Nodes: [y_2, out_20], Original ATen: [aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_20 => _unsafe_index_15, _unsafe_index_16 # y_2 => relu_4 # Graph fragment: # %relu_4 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_13,), kwargs = {}) # %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_15, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_relu_14 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_relu_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 663552 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 72 x1 = (xindex // 72) % 72 x2 = (xindex // 5184) x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-4) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-4) + x1))))) + (4096*x2)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t3/ct3xvdpc3nmxwtw2j2enpzfkkspevnyjkfxii4lmgh2dbevvxirv.py # Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_21 => convolution_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_16, %primals_30, %primals_31, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_15 = async_compile.triton('triton_poi_fused_convolution_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1)) assert_size_stride(primals_3, (32, ), (1, )) assert_size_stride(primals_4, (32, ), (1, )) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, ), (1, )) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (64, ), (1, )) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, ), (1, )) assert_size_stride(primals_17, (64, ), (1, )) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64, ), (1, )) assert_size_stride(primals_20, (64, ), (1, )) assert_size_stride(primals_21, (64, ), (1, )) assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_23, (64, ), (1, )) assert_size_stride(primals_24, (64, ), (1, )) assert_size_stride(primals_25, (64, ), (1, )) assert_size_stride(primals_26, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (32, ), (1, )) assert_size_stride(primals_28, (32, ), (1, )) assert_size_stride(primals_29, (32, ), (1, )) assert_size_stride(primals_30, (3, 32, 9, 9), (2592, 81, 9, 1)) assert_size_stride(primals_31, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 62208, grid=grid(62208), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf2 = buf1; del buf1 # reuse buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32) buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_red_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 128, 4096, grid=grid(128), stream=stream0) del primals_3 buf3 = empty_strided_cuda((128, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_4, buf3, 128, grid=grid(128), stream=stream0) del primals_4 buf4 = empty_strided_cuda((128, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_5, buf4, 128, grid=grid(128), stream=stream0) del primals_5 buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1), torch.float32) # Topologically Sorted Source Nodes: [y, out_2], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 557568, grid=grid(557568), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = empty_strided_cuda((256, ), (1, ), torch.float32) buf13 = empty_strided_cuda((256, ), (1, ), torch.float32) buf11 = buf10; del buf10 # reuse buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32) buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf15 # reuse buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [out_3, instance_norm_1, y_1], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu] triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4.run(buf11, buf17, primals_8, primals_9, primals_7, buf12, buf13, buf14, buf18, 256, 1024, grid=grid(256), stream=stream0) del primals_7 del primals_8 del primals_9 buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_5.run(buf18, buf19, 295936, grid=grid(295936), stream=stream0) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf21 = buf20; del buf20 # reuse buf24 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32) buf25 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf27 = reinterpret_tensor(buf25, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [out_5, instance_norm_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_6.run(buf21, buf27, primals_11, buf24, 256, 1024, grid=grid(256), stream=stream0) del primals_11 buf22 = empty_strided_cuda((256, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm_2], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_12, buf22, 256, grid=grid(256), stream=stream0) del primals_12 buf23 = empty_strided_cuda((256, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm_2], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_13, buf23, 256, grid=grid(256), stream=stream0) del primals_13 buf28 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_8.run(buf21, buf24, buf27, buf22, buf23, buf28, 295936, grid=grid(295936), stream=stream0) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf31 = empty_strided_cuda((256, ), (1, ), torch.float32) buf30 = buf29; del buf29 # reuse buf32 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf36 = buf18; del buf18 # reuse buf35 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [out_8, out_9, out_10], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add] triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9.run(buf30, buf36, primals_16, primals_15, primals_17, buf31, buf32, buf35, 256, 1024, grid=grid(256), stream=stream0) del primals_15 del primals_16 del primals_17 buf37 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_5.run(buf36, buf37, 295936, grid=grid(295936), stream=stream0) # Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf39 = buf38; del buf38 # reuse buf42 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32) buf43 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf45 = reinterpret_tensor(buf43, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf43 # reuse # Topologically Sorted Source Nodes: [out_12, instance_norm_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_6.run(buf39, buf45, primals_19, buf42, 256, 1024, grid=grid(256), stream=stream0) del primals_19 buf40 = empty_strided_cuda((256, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm_4], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_20, buf40, 256, grid=grid(256), stream=stream0) del primals_20 buf41 = empty_strided_cuda((256, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm_4], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_21, buf41, 256, grid=grid(256), stream=stream0) del primals_21 buf46 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_13, out_14], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_8.run(buf39, buf42, buf45, buf40, buf41, buf46, 295936, grid=grid(295936), stream=stream0) # Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.convolution] buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf48 = buf47; del buf47 # reuse buf50 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf51 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf53 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_10.run(buf48, primals_23, buf50, buf51, buf53, 256, 1024, grid=grid(256), stream=stream0) del primals_23 buf49 = empty_strided_cuda((256, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_24, buf49, 256, grid=grid(256), stream=stream0) del primals_24 buf54 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange] triton_poi_fused_arange_11.run(buf54, 64, grid=grid(64), stream=stream0) buf55 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_12.run(buf55, 64, grid=grid(64), stream=stream0) buf56 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) # Topologically Sorted Source Nodes: [out_17, x_in, out_18], Original ATen: [aten.add, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_add_reflection_pad2d_13.run(buf55, buf48, buf50, buf51, buf49, primals_25, buf36, buf56, 1115136, grid=grid(1115136), stream=stream0) del buf36 del buf51 del primals_25 # Topologically Sorted Source Nodes: [out_19], Original ATen: [aten.convolution] buf57 = extern_kernels.convolution(buf56, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf58 = buf57; del buf57 # reuse buf61 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32) buf62 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32) buf64 = reinterpret_tensor(buf62, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf62 # reuse # Topologically Sorted Source Nodes: [out_19, instance_norm_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_red_fused__native_batch_norm_legit_convolution_1.run(buf58, buf64, primals_27, buf61, 128, 4096, grid=grid(128), stream=stream0) del primals_27 buf59 = empty_strided_cuda((128, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm_6], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_28, buf59, 128, grid=grid(128), stream=stream0) del primals_28 buf60 = empty_strided_cuda((128, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm_6], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_29, buf60, 128, grid=grid(128), stream=stream0) del primals_29 buf65 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2, out_20], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_14.run(buf58, buf61, buf64, buf59, buf60, buf65, 663552, grid=grid(663552), stream=stream0) # Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution] buf66 = extern_kernels.convolution(buf65, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf67 = buf66; del buf66 # reuse # Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution] triton_poi_fused_convolution_15.run(buf67, primals_31, 49152, grid=grid(49152), stream=stream0) del primals_31 return (buf67, primals_2, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf19, buf21, buf22, buf23, buf24, buf27, buf28, buf30, buf31, reinterpret_tensor(buf35, (256, ), (1, ), 0), buf37, buf39, buf40, buf41, buf42, buf45, buf46, buf48, buf49, reinterpret_tensor(buf53, (256, ), (1, ), 0), buf54, buf55, buf56, buf58, buf59, buf60, buf61, buf64, buf65, reinterpret_tensor(buf50, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf32, (1, 256, 1, 1), (256, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((3, 32, 9, 9), (2592, 81, 9, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.parallel import torch.optim import torch.utils.data import torch.onnx class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class SmallTransformerNet(torch.nn.Module): def __init__(self): super(SmallTransformerNet, self).__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.res6 = ResidualBlock(64) self.res7 = ResidualBlock(64) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, X): y = self.relu(self.in1(self.conv1(X))) y = self.relu(self.in2(self.conv2(y))) y = self.res6(y) y = self.res7(y) y = self.relu(self.in5(self.deconv2(y))) y = self.deconv3(y) return y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.parallel import torch.optim import torch.utils.data import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 62208 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 32 tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6_tmp[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 32, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 557568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x2 = xindex // 4356 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 64 tmp0 = tl.load(in_ptr0 + x0 % 64, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0 % 64, None, eviction_policy='evict_last') tmp2 = tl.load(in_out_ptr0 + (r3 + 1024 * x0), None) tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = tl.broadcast_to(tmp5, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.full([1], 1024, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 1024.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp4 - tmp12 tmp24 = tmp23 * tmp22 tmp25 = tmp24 * tmp0 tmp26 = tmp25 + tmp1 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tl.store(out_ptr0 + x0, tmp0, None) tl.store(out_ptr1 + x0, tmp1, None) tl.store(in_out_ptr0 + (r3 + 1024 * x0), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp22, None) tl.store(out_ptr3 + (r3 + 1024 * x0), tmp28, None) tl.store(out_ptr2 + x0, tmp12, None) @triton.jit def triton_poi_fused_reflection_pad2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_6(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 64, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 64 tmp0 = tl.load(in_ptr0 + x0 % 64, None, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 1024 * x0), None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp27 = tl.load(in_out_ptr1 + (r3 + 1024 * x0), None) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = tl.broadcast_to(tmp4, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.full([1], 1024, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp11 tmp18 = 1024.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tmp23 * tmp0 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tl.store(out_ptr0 + x0, tmp0, None) tl.store(in_out_ptr0 + (r3 + 1024 * x0), tmp3, None) tl.store(in_out_ptr1 + (r3 + 1024 * x0), tmp28, None) tl.store(out_ptr3 + x0, tmp22, None) tl.store(out_ptr1 + x0, tmp11, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_10(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 1024, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 1024.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None) tl.store(out_ptr2 + x3, tmp20, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp15, None) @triton.jit def triton_poi_fused_arange_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_reflection_pad2d_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 66 % 66 x0 = xindex % 66 x4 = xindex // 4356 x2 = xindex // 4356 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x4), xmask, eviction_policy='evict_last') tmp11 = tmp9 - tmp10 tmp13 = 1024.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp11 * tmp17 tmp20 = tmp18 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr6 + (tmp8 + 32 * tmp4 + 1024 * x4), xmask, eviction_policy='evict_last') tmp24 = tmp22 + tmp23 tl.store(out_ptr0 + x7, tmp24, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 72 x1 = xindex // 72 % 72 x2 = xindex // 5184 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64,), (1,)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64,), (1,)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64,), (1,)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64,), (1,)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64,), (1,)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (32,), (1,)) assert_size_stride(primals_28, (32,), (1,)) assert_size_stride(primals_29, (32,), (1,)) assert_size_stride(primals_30, (3, 32, 9, 9), (2592, 81, 9, 1)) assert_size_stride(primals_31, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(62208)](primals_1, buf0, 62208, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32 ) buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch .float32) buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf6 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](buf2 , buf8, primals_3, buf5, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_3 buf3 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_4, buf3, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_4 buf4 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_5, buf4, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(557568)](buf2, buf5, buf8, buf3, buf4, buf9, 557568, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = empty_strided_cuda((256,), (1,), torch.float32) buf13 = empty_strided_cuda((256,), (1,), torch.float32) buf11 = buf10 del buf10 buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf15 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_4[ grid(256)](buf11, buf17, primals_8, primals_9, primals_7, buf12, buf13, buf14, buf18, 256, 1024, num_warps=8, num_stages=1) del primals_7 del primals_8 del primals_9 buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_5[grid(295936)](buf18, buf19, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf21 = buf20 del buf20 buf24 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf25 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf27 = reinterpret_tensor(buf25, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf25 triton_per_fused__native_batch_norm_legit_convolution_6[grid(256)]( buf21, buf27, primals_11, buf24, 256, 1024, num_warps=8, num_stages=1) del primals_11 buf22 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_7[grid(256)](primals_12, buf22, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_12 buf23 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_7[grid(256)](primals_13, buf23, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_13 buf28 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_8[grid(295936)](buf21, buf24, buf27, buf22, buf23, buf28, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf31 = empty_strided_cuda((256,), (1,), torch.float32) buf30 = buf29 del buf29 buf32 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf36 = buf18 del buf18 buf35 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_9[grid (256)](buf30, buf36, primals_16, primals_15, primals_17, buf31, buf32, buf35, 256, 1024, num_warps=8, num_stages=1) del primals_15 del primals_16 del primals_17 buf37 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_5[grid(295936)](buf36, buf37, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf39 = buf38 del buf38 buf42 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf43 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf45 = reinterpret_tensor(buf43, (1, 256, 1, 1), (256, 1, 1, 1), 0) del buf43 triton_per_fused__native_batch_norm_legit_convolution_6[grid(256)]( buf39, buf45, primals_19, buf42, 256, 1024, num_warps=8, num_stages=1) del primals_19 buf40 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_7[grid(256)](primals_20, buf40, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_20 buf41 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_7[grid(256)](primals_21, buf41, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_21 buf46 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_8[grid(295936)](buf39, buf42, buf45, buf40, buf41, buf46, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf48 = buf47 del buf47 buf50 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf51 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) buf53 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_10[grid(256)]( buf48, primals_23, buf50, buf51, buf53, 256, 1024, num_warps=8, num_stages=1) del primals_23 buf49 = empty_strided_cuda((256,), (1,), torch.float32) triton_poi_fused_repeat_7[grid(256)](primals_24, buf49, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_24 buf54 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_11[grid(64)](buf54, 64, XBLOCK=64, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_12[grid(64)](buf55, 64, XBLOCK=64, num_warps=1, num_stages=1) buf56 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_add_reflection_pad2d_13[grid(1115136)]( buf55, buf48, buf50, buf51, buf49, primals_25, buf36, buf56, 1115136, XBLOCK=512, num_warps=8, num_stages=1) del buf36 del buf51 del primals_25 buf57 = extern_kernels.convolution(buf56, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf58 = buf57 del buf57 buf61 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf62 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32) buf64 = reinterpret_tensor(buf62, (1, 128, 1, 1), (128, 1, 1, 1), 0) del buf62 triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)]( buf58, buf64, primals_27, buf61, 128, 4096, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del primals_27 buf59 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_28, buf59, 128, XBLOCK =128, num_warps=4, num_stages=1) del primals_28 buf60 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(128)](primals_29, buf60, 128, XBLOCK =128, num_warps=4, num_stages=1) del primals_29 buf65 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_14[grid(663552)](buf58, buf61, buf64, buf59, buf60, buf65, 663552, XBLOCK=512, num_warps=8, num_stages=1) buf66 = extern_kernels.convolution(buf65, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf67 = buf66 del buf66 triton_poi_fused_convolution_15[grid(49152)](buf67, primals_31, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_31 return (buf67, primals_2, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf19, buf21, buf22, buf23, buf24, buf27, buf28, buf30, buf31, reinterpret_tensor(buf35, (256,), (1,), 0), buf37, buf39, buf40, buf41, buf42, buf45, buf46, buf48, buf49, reinterpret_tensor(buf53, (256,), (1,), 0), buf54, buf55, buf56, buf58, buf59, buf60, buf61, buf64, buf65, reinterpret_tensor(buf50, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf32, (1, 256, 1, 1), (256, 1, 1, 1), 0)) class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class SmallTransformerNetNew(torch.nn.Module): def __init__(self): super(SmallTransformerNetNew, self).__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.res6 = ResidualBlock(64) self.res7 = ResidualBlock(64) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.in1.weight primals_5 = self.in1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.in2.weight primals_9 = self.in2.bias primals_10 = self.res6.conv1.conv2d.weight primals_11 = self.res6.conv1.conv2d.bias primals_12 = self.res6.in1.weight primals_13 = self.res6.in1.bias primals_14 = self.res6.conv2.conv2d.weight primals_15 = self.res6.conv2.conv2d.bias primals_16 = self.res6.in2.weight primals_17 = self.res6.in2.bias primals_18 = self.res7.conv1.conv2d.weight primals_19 = self.res7.conv1.conv2d.bias primals_20 = self.res7.in1.weight primals_21 = self.res7.in1.bias primals_22 = self.res7.conv2.conv2d.weight primals_23 = self.res7.conv2.conv2d.bias primals_24 = self.res7.in2.weight primals_25 = self.res7.in2.bias primals_26 = self.deconv2.conv2d.weight primals_27 = self.deconv2.conv2d.bias primals_28 = self.in5.weight primals_29 = self.in5.bias primals_30 = self.deconv3.conv2d.weight primals_31 = self.deconv3.conv2d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return output[0]
CorentinChauvin/style-transfer-KD
SmallTransformerNet
false
5,119
[ "MIT" ]
1
87bcb2963dbb8d09faf94c74a744f358cafe5427
https://github.com/CorentinChauvin/style-transfer-KD/tree/87bcb2963dbb8d09faf94c74a744f358cafe5427
import torch import torch.nn.parallel import torch.optim import torch.utils.data import torch.onnx class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super().__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super().__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out class Model(torch.nn.Module): def __init__(self): super().__init__() self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.res6 = ResidualBlock(64) self.res7 = ResidualBlock(64) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) self.relu = torch.nn.ReLU() def forward(self, X): y = self.relu(self.in1(self.conv1(X))) y = self.relu(self.in2(self.conv2(y))) y = self.res6(y) y = self.res7(y) y = self.relu(self.in5(self.deconv2(y))) y = self.deconv3(y) return y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return []
LabelPropagation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bk/cbkxsp72qxe4rkxwcicmzvtskfizpwnd56mg5cfatr7zr2p42ofj.py # Topologically Sorted Source Nodes: [mul_1, res, out, out_1], Original ATen: [aten.mul, aten.add, aten.clamp] # Source node to ATen node mapping: # mul_1 => mul_1 # out => add # out_1 => clamp_max, clamp_min # res => mul # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 0.5), kwargs = {}) # %mul : [num_users=50] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {}) triton_poi_fused_add_clamp_mul_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 1.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(in_out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul_1, res, out, out_1], Original ATen: [aten.mul, aten.add, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_mul_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [res, mul_2, out_2, out_3], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf3, arg0_1, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [res, mul_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf5, arg0_1, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [res, mul_4, out_6, out_7], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf7, arg0_1, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [res, mul_5, out_8, out_9], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf9, arg0_1, 256, grid=grid(256), stream=stream0) buf10 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [res, mul_6, out_10, out_11], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf11, arg0_1, 256, grid=grid(256), stream=stream0) buf12 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [res, mul_7, out_12, out_13], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf13, arg0_1, 256, grid=grid(256), stream=stream0) buf14 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [res, mul_8, out_14, out_15], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf15, arg0_1, 256, grid=grid(256), stream=stream0) buf16 = reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [res, mul_9, out_16, out_17], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf17, arg0_1, 256, grid=grid(256), stream=stream0) buf18 = reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [res, mul_10, out_18, out_19], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf19, arg0_1, 256, grid=grid(256), stream=stream0) buf20 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20) buf21 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf20 # reuse # Topologically Sorted Source Nodes: [res, mul_11, out_20, out_21], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf21, arg0_1, 256, grid=grid(256), stream=stream0) buf22 = reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), out=buf22) buf23 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf22 # reuse # Topologically Sorted Source Nodes: [res, mul_12, out_22, out_23], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf23, arg0_1, 256, grid=grid(256), stream=stream0) buf24 = reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0); del buf21 # reuse # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [res, mul_13, out_24, out_25], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf25, arg0_1, 256, grid=grid(256), stream=stream0) buf26 = reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf26 # reuse # Topologically Sorted Source Nodes: [res, mul_14, out_26, out_27], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf27, arg0_1, 256, grid=grid(256), stream=stream0) buf28 = reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), out=buf28) buf29 = reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf28 # reuse # Topologically Sorted Source Nodes: [res, mul_15, out_28, out_29], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf29, arg0_1, 256, grid=grid(256), stream=stream0) buf30 = reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0); del buf27 # reuse # Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf30 # reuse # Topologically Sorted Source Nodes: [res, mul_16, out_30, out_31], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf31, arg0_1, 256, grid=grid(256), stream=stream0) buf32 = reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0); del buf29 # reuse # Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0), out=buf32) buf33 = reinterpret_tensor(buf32, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf32 # reuse # Topologically Sorted Source Nodes: [res, mul_17, out_32, out_33], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf33, arg0_1, 256, grid=grid(256), stream=stream0) buf34 = reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0); del buf31 # reuse # Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0), out=buf34) buf35 = reinterpret_tensor(buf34, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf34 # reuse # Topologically Sorted Source Nodes: [res, mul_18, out_34, out_35], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf35, arg0_1, 256, grid=grid(256), stream=stream0) buf36 = reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0); del buf33 # reuse # Topologically Sorted Source Nodes: [matmul_18], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36) buf37 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf36 # reuse # Topologically Sorted Source Nodes: [res, mul_19, out_36, out_37], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf37, arg0_1, 256, grid=grid(256), stream=stream0) buf38 = reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0); del buf35 # reuse # Topologically Sorted Source Nodes: [matmul_19], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0), out=buf38) buf39 = reinterpret_tensor(buf38, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf38 # reuse # Topologically Sorted Source Nodes: [res, mul_20, out_38, out_39], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf39, arg0_1, 256, grid=grid(256), stream=stream0) buf40 = reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0); del buf37 # reuse # Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0), out=buf40) buf41 = reinterpret_tensor(buf40, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf40 # reuse # Topologically Sorted Source Nodes: [res, mul_21, out_40, out_41], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf41, arg0_1, 256, grid=grid(256), stream=stream0) buf42 = reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0); del buf39 # reuse # Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0), out=buf42) buf43 = reinterpret_tensor(buf42, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf42 # reuse # Topologically Sorted Source Nodes: [res, mul_22, out_42, out_43], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf43, arg0_1, 256, grid=grid(256), stream=stream0) buf44 = reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0); del buf41 # reuse # Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0), out=buf44) buf45 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf44 # reuse # Topologically Sorted Source Nodes: [res, mul_23, out_44, out_45], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf45, arg0_1, 256, grid=grid(256), stream=stream0) buf46 = reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0); del buf43 # reuse # Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0), out=buf46) buf47 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf46 # reuse # Topologically Sorted Source Nodes: [res, mul_24, out_46, out_47], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf47, arg0_1, 256, grid=grid(256), stream=stream0) buf48 = reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0); del buf45 # reuse # Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0), out=buf48) buf49 = reinterpret_tensor(buf48, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf48 # reuse # Topologically Sorted Source Nodes: [res, mul_25, out_48, out_49], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf49, arg0_1, 256, grid=grid(256), stream=stream0) buf50 = reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0); del buf47 # reuse # Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0), out=buf50) buf51 = reinterpret_tensor(buf50, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf50 # reuse # Topologically Sorted Source Nodes: [res, mul_26, out_50, out_51], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf51, arg0_1, 256, grid=grid(256), stream=stream0) buf52 = reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0); del buf49 # reuse # Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0), out=buf52) buf53 = reinterpret_tensor(buf52, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf52 # reuse # Topologically Sorted Source Nodes: [res, mul_27, out_52, out_53], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf53, arg0_1, 256, grid=grid(256), stream=stream0) buf54 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0); del buf51 # reuse # Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0), out=buf54) buf55 = reinterpret_tensor(buf54, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf54 # reuse # Topologically Sorted Source Nodes: [res, mul_28, out_54, out_55], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf55, arg0_1, 256, grid=grid(256), stream=stream0) buf56 = reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0); del buf53 # reuse # Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0), out=buf56) buf57 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf56 # reuse # Topologically Sorted Source Nodes: [res, mul_29, out_56, out_57], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf57, arg0_1, 256, grid=grid(256), stream=stream0) buf58 = reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0); del buf55 # reuse # Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0), out=buf58) buf59 = reinterpret_tensor(buf58, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf58 # reuse # Topologically Sorted Source Nodes: [res, mul_30, out_58, out_59], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf59, arg0_1, 256, grid=grid(256), stream=stream0) buf60 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0); del buf57 # reuse # Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0), out=buf60) buf61 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf60 # reuse # Topologically Sorted Source Nodes: [res, mul_31, out_60, out_61], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf61, arg0_1, 256, grid=grid(256), stream=stream0) buf62 = reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0); del buf59 # reuse # Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0), out=buf62) buf63 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf62 # reuse # Topologically Sorted Source Nodes: [res, mul_32, out_62, out_63], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf63, arg0_1, 256, grid=grid(256), stream=stream0) buf64 = reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0); del buf61 # reuse # Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0), out=buf64) buf65 = reinterpret_tensor(buf64, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf64 # reuse # Topologically Sorted Source Nodes: [res, mul_33, out_64, out_65], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf65, arg0_1, 256, grid=grid(256), stream=stream0) buf66 = reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0); del buf63 # reuse # Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0), out=buf66) buf67 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf66 # reuse # Topologically Sorted Source Nodes: [res, mul_34, out_66, out_67], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf67, arg0_1, 256, grid=grid(256), stream=stream0) buf68 = reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0); del buf65 # reuse # Topologically Sorted Source Nodes: [matmul_34], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0), out=buf68) buf69 = reinterpret_tensor(buf68, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf68 # reuse # Topologically Sorted Source Nodes: [res, mul_35, out_68, out_69], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf69, arg0_1, 256, grid=grid(256), stream=stream0) buf70 = reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0); del buf67 # reuse # Topologically Sorted Source Nodes: [matmul_35], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0), out=buf70) buf71 = reinterpret_tensor(buf70, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf70 # reuse # Topologically Sorted Source Nodes: [res, mul_36, out_70, out_71], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf71, arg0_1, 256, grid=grid(256), stream=stream0) buf72 = reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0); del buf69 # reuse # Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0), out=buf72) buf73 = reinterpret_tensor(buf72, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf72 # reuse # Topologically Sorted Source Nodes: [res, mul_37, out_72, out_73], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf73, arg0_1, 256, grid=grid(256), stream=stream0) buf74 = reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0); del buf71 # reuse # Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0), out=buf74) buf75 = reinterpret_tensor(buf74, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf74 # reuse # Topologically Sorted Source Nodes: [res, mul_38, out_74, out_75], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf75, arg0_1, 256, grid=grid(256), stream=stream0) buf76 = reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0); del buf73 # reuse # Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0), out=buf76) buf77 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf76 # reuse # Topologically Sorted Source Nodes: [res, mul_39, out_76, out_77], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf77, arg0_1, 256, grid=grid(256), stream=stream0) buf78 = reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0); del buf75 # reuse # Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0), out=buf78) buf79 = reinterpret_tensor(buf78, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf78 # reuse # Topologically Sorted Source Nodes: [res, mul_40, out_78, out_79], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf79, arg0_1, 256, grid=grid(256), stream=stream0) buf80 = reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0); del buf77 # reuse # Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0), out=buf80) buf81 = reinterpret_tensor(buf80, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf80 # reuse # Topologically Sorted Source Nodes: [res, mul_41, out_80, out_81], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf81, arg0_1, 256, grid=grid(256), stream=stream0) buf82 = reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0); del buf79 # reuse # Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0), out=buf82) buf83 = reinterpret_tensor(buf82, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf82 # reuse # Topologically Sorted Source Nodes: [res, mul_42, out_82, out_83], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf83, arg0_1, 256, grid=grid(256), stream=stream0) buf84 = reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0); del buf81 # reuse # Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0), out=buf84) buf85 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf84 # reuse # Topologically Sorted Source Nodes: [res, mul_43, out_84, out_85], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf85, arg0_1, 256, grid=grid(256), stream=stream0) buf86 = reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0); del buf83 # reuse # Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0), out=buf86) buf87 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf86 # reuse # Topologically Sorted Source Nodes: [res, mul_44, out_86, out_87], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf87, arg0_1, 256, grid=grid(256), stream=stream0) buf88 = reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0); del buf85 # reuse # Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0), out=buf88) buf89 = reinterpret_tensor(buf88, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf88 # reuse # Topologically Sorted Source Nodes: [res, mul_45, out_88, out_89], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf89, arg0_1, 256, grid=grid(256), stream=stream0) buf90 = reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0); del buf87 # reuse # Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0), out=buf90) buf91 = reinterpret_tensor(buf90, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf90 # reuse # Topologically Sorted Source Nodes: [res, mul_46, out_90, out_91], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf91, arg0_1, 256, grid=grid(256), stream=stream0) buf92 = reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0); del buf89 # reuse # Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0), out=buf92) buf93 = reinterpret_tensor(buf92, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf92 # reuse # Topologically Sorted Source Nodes: [res, mul_47, out_92, out_93], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf93, arg0_1, 256, grid=grid(256), stream=stream0) buf94 = reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0); del buf91 # reuse # Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0), out=buf94) buf95 = reinterpret_tensor(buf94, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf94 # reuse # Topologically Sorted Source Nodes: [res, mul_48, out_94, out_95], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf95, arg0_1, 256, grid=grid(256), stream=stream0) buf96 = reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0); del buf93 # reuse # Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0), out=buf96) buf97 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf96 # reuse # Topologically Sorted Source Nodes: [res, mul_49, out_96, out_97], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf97, arg0_1, 256, grid=grid(256), stream=stream0) buf98 = reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0); del buf95 # reuse # Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf97, (16, 4, 4), (16, 4, 1), 0), out=buf98) del arg1_1 del buf97 buf99 = reinterpret_tensor(buf98, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf98 # reuse # Topologically Sorted Source Nodes: [res, mul_50, out_98, out_99], Original ATen: [aten.mul, aten.add, aten.clamp] triton_poi_fused_add_clamp_mul_0.run(buf99, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf99, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class LabelPropagation(nn.Module): """label propagation model adapted from https://github.com/CUAI/CorrectAndSmooth `"Learning from Labeled and Unlabeled Datawith Label Propagation" <http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf>`_ paper """ def __init__(self, num_layers=50, alpha=0.5, residual=True): super().__init__() self.num_layers = num_layers self.alpha = alpha self.residual = residual @torch.no_grad() def forward(self, y, adj, mask=None): if y.dtype == torch.long: y = F.one_hot(y.view(-1)).float() out = y if mask is not None: out = torch.zeros_like(y) out[mask] = y[mask] if self.residual: res = (1 - self.alpha) * out else: res = out.clone() for _ in range(self.num_layers): out = self.alpha * (adj @ out) + res out = torch.clamp(out, 0, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 1.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(in_out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_clamp_mul_0[grid(256)](buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out =buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_clamp_mul_0[grid(256)](buf3, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out =buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_add_clamp_mul_0[grid(256)](buf5, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out =buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_add_clamp_mul_0[grid(256)](buf7, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out =buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused_add_clamp_mul_0[grid(256)](buf9, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out =buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 triton_poi_fused_add_clamp_mul_0[grid(256)](buf11, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0) del buf9 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf12 triton_poi_fused_add_clamp_mul_0[grid(256)](buf13, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0) del buf11 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf14 triton_poi_fused_add_clamp_mul_0[grid(256)](buf15, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0) del buf13 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf16 triton_poi_fused_add_clamp_mul_0[grid(256)](buf17, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf18 = reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0) del buf15 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf18 triton_poi_fused_add_clamp_mul_0[grid(256)](buf19, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf20 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0) del buf17 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20) buf21 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf20 triton_poi_fused_add_clamp_mul_0[grid(256)](buf21, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf22 = reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0) del buf19 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), out=buf22) buf23 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf22 triton_poi_fused_add_clamp_mul_0[grid(256)](buf23, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf24 = reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0) del buf21 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf24 triton_poi_fused_add_clamp_mul_0[grid(256)](buf25, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf26 = reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0) del buf23 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf26 triton_poi_fused_add_clamp_mul_0[grid(256)](buf27, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0) del buf25 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), out=buf28) buf29 = reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf28 triton_poi_fused_add_clamp_mul_0[grid(256)](buf29, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf30 = reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0) del buf27 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf30 triton_poi_fused_add_clamp_mul_0[grid(256)](buf31, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf32 = reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0) del buf29 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0), out=buf32) buf33 = reinterpret_tensor(buf32, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf32 triton_poi_fused_add_clamp_mul_0[grid(256)](buf33, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf34 = reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0) del buf31 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0), out=buf34) buf35 = reinterpret_tensor(buf34, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf34 triton_poi_fused_add_clamp_mul_0[grid(256)](buf35, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf36 = reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0) del buf33 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36) buf37 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf36 triton_poi_fused_add_clamp_mul_0[grid(256)](buf37, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf38 = reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0) del buf35 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0), out=buf38) buf39 = reinterpret_tensor(buf38, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf38 triton_poi_fused_add_clamp_mul_0[grid(256)](buf39, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf40 = reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0) del buf37 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0), out=buf40) buf41 = reinterpret_tensor(buf40, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf40 triton_poi_fused_add_clamp_mul_0[grid(256)](buf41, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf42 = reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0) del buf39 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0), out=buf42) buf43 = reinterpret_tensor(buf42, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf42 triton_poi_fused_add_clamp_mul_0[grid(256)](buf43, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf44 = reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0) del buf41 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0), out=buf44) buf45 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf44 triton_poi_fused_add_clamp_mul_0[grid(256)](buf45, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf46 = reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0) del buf43 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0), out=buf46) buf47 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf46 triton_poi_fused_add_clamp_mul_0[grid(256)](buf47, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf48 = reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0) del buf45 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0), out=buf48) buf49 = reinterpret_tensor(buf48, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf48 triton_poi_fused_add_clamp_mul_0[grid(256)](buf49, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf50 = reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0) del buf47 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0), out=buf50) buf51 = reinterpret_tensor(buf50, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf50 triton_poi_fused_add_clamp_mul_0[grid(256)](buf51, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf52 = reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0) del buf49 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0), out=buf52) buf53 = reinterpret_tensor(buf52, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf52 triton_poi_fused_add_clamp_mul_0[grid(256)](buf53, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf54 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0) del buf51 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0), out=buf54) buf55 = reinterpret_tensor(buf54, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf54 triton_poi_fused_add_clamp_mul_0[grid(256)](buf55, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf56 = reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0) del buf53 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0), out=buf56) buf57 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf56 triton_poi_fused_add_clamp_mul_0[grid(256)](buf57, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf58 = reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0) del buf55 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0), out=buf58) buf59 = reinterpret_tensor(buf58, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf58 triton_poi_fused_add_clamp_mul_0[grid(256)](buf59, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf60 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0) del buf57 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0), out=buf60) buf61 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf60 triton_poi_fused_add_clamp_mul_0[grid(256)](buf61, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf62 = reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0) del buf59 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0), out=buf62) buf63 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf62 triton_poi_fused_add_clamp_mul_0[grid(256)](buf63, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf64 = reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0) del buf61 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0), out=buf64) buf65 = reinterpret_tensor(buf64, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf64 triton_poi_fused_add_clamp_mul_0[grid(256)](buf65, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf66 = reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0) del buf63 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0), out=buf66) buf67 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf66 triton_poi_fused_add_clamp_mul_0[grid(256)](buf67, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf68 = reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0) del buf65 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0), out=buf68) buf69 = reinterpret_tensor(buf68, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf68 triton_poi_fused_add_clamp_mul_0[grid(256)](buf69, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf70 = reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0) del buf67 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0), out=buf70) buf71 = reinterpret_tensor(buf70, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf70 triton_poi_fused_add_clamp_mul_0[grid(256)](buf71, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf72 = reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0) del buf69 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0), out=buf72) buf73 = reinterpret_tensor(buf72, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf72 triton_poi_fused_add_clamp_mul_0[grid(256)](buf73, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf74 = reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0) del buf71 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0), out=buf74) buf75 = reinterpret_tensor(buf74, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf74 triton_poi_fused_add_clamp_mul_0[grid(256)](buf75, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf76 = reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0) del buf73 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0), out=buf76) buf77 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf76 triton_poi_fused_add_clamp_mul_0[grid(256)](buf77, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf78 = reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0) del buf75 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0), out=buf78) buf79 = reinterpret_tensor(buf78, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf78 triton_poi_fused_add_clamp_mul_0[grid(256)](buf79, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf80 = reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0) del buf77 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0), out=buf80) buf81 = reinterpret_tensor(buf80, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf80 triton_poi_fused_add_clamp_mul_0[grid(256)](buf81, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf82 = reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0) del buf79 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0), out=buf82) buf83 = reinterpret_tensor(buf82, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf82 triton_poi_fused_add_clamp_mul_0[grid(256)](buf83, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf84 = reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0) del buf81 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0), out=buf84) buf85 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf84 triton_poi_fused_add_clamp_mul_0[grid(256)](buf85, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf86 = reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0) del buf83 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0), out=buf86) buf87 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf86 triton_poi_fused_add_clamp_mul_0[grid(256)](buf87, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf88 = reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0) del buf85 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0), out=buf88) buf89 = reinterpret_tensor(buf88, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf88 triton_poi_fused_add_clamp_mul_0[grid(256)](buf89, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf90 = reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0) del buf87 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0), out=buf90) buf91 = reinterpret_tensor(buf90, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf90 triton_poi_fused_add_clamp_mul_0[grid(256)](buf91, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf92 = reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0) del buf89 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0), out=buf92) buf93 = reinterpret_tensor(buf92, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf92 triton_poi_fused_add_clamp_mul_0[grid(256)](buf93, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf94 = reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0) del buf91 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0), out=buf94) buf95 = reinterpret_tensor(buf94, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf94 triton_poi_fused_add_clamp_mul_0[grid(256)](buf95, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf96 = reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0) del buf93 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0), out=buf96) buf97 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf96 triton_poi_fused_add_clamp_mul_0[grid(256)](buf97, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf98 = reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0) del buf95 extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf97, (16, 4, 4), (16, 4, 1), 0), out=buf98) del arg1_1 del buf97 buf99 = reinterpret_tensor(buf98, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf98 triton_poi_fused_add_clamp_mul_0[grid(256)](buf99, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf99, class LabelPropagationNew(nn.Module): """label propagation model adapted from https://github.com/CUAI/CorrectAndSmooth `"Learning from Labeled and Unlabeled Datawith Label Propagation" <http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf>`_ paper """ def __init__(self, num_layers=50, alpha=0.5, residual=True): super().__init__() self.num_layers = num_layers self.alpha = alpha self.residual = residual def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
EdisonLeeeee/Graphgallery
LabelPropagation
false
5,120
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """label propagation model adapted from https://github.com/CUAI/CorrectAndSmooth `"Learning from Labeled and Unlabeled Datawith Label Propagation" <http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf>`_ paper """ def __init__(self, num_layers=50, alpha=0.5, residual=True): super().__init__() self.num_layers = num_layers self.alpha = alpha self.residual = residual @torch.no_grad() def forward(self, y, adj, mask=None): if y.dtype == torch.long: y = F.one_hot(y.view(-1)).float() out = y if mask is not None: out = torch.zeros_like(y) out[mask] = y[mask] if self.residual: res = (1 - self.alpha) * out else: res = out.clone() for _ in range(self.num_layers): out = self.alpha * (adj @ out) + res out = torch.clamp(out, 0, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Foo
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/74/c74yuox45avk3dyefqdkvkdygfzym6suwnjtqzkgr6lapmzwsqve.py # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class Foo(torch.nn.Module): def __init__(self, size): super(Foo, self).__init__() self.n = torch.nn.Parameter(torch.ones(size)) self.m = torch.nn.Parameter(torch.ones(size)) def forward(self, input): return self.n * input + self.m def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class FooNew(torch.nn.Module): def __init__(self, size): super(FooNew, self).__init__() self.n = torch.nn.Parameter(torch.ones(size)) self.m = torch.nn.Parameter(torch.ones(size)) def forward(self, input_0): primals_1 = self.n primals_3 = self.m primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Ella77/tacotron2_multispeaker_pytorch
Foo
false
5,121
[ "BSD-3-Clause" ]
1
859eab0a8e3bd7545e623ce47fe1563702d38442
https://github.com/Ella77/tacotron2_multispeaker_pytorch/tree/859eab0a8e3bd7545e623ce47fe1563702d38442
import torch import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class Model(torch.nn.Module): def __init__(self, size): super().__init__() self.n = torch.nn.Parameter(torch.ones(size)) self.m = torch.nn.Parameter(torch.ones(size)) def forward(self, input): return self.n * input + self.m def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
LinearZeros
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/n3/cn3xefa3yajkd7wdkmubvtixzl5wmbx427xsxd2dcdu4gxfgqriv.py # Topologically Sorted Source Nodes: [mul, exp, output_1], Original ATen: [aten.mul, aten.exp, aten.view] # Source node to ATen node mapping: # exp => exp # mul => mul # output_1 => mul_1, view_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3.0), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %exp), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_exp_mul_view_0 = async_compile.triton('triton_poi_fused_exp_mul_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_view_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tl.store(in_out_ptr0 + (x4), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, exp, output_1], Original ATen: [aten.mul, aten.exp, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_exp_mul_view_0.run(buf2, buf0, primals_4, 256, grid=grid(256), stream=stream0) return (buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LinearZeros(nn.Linear): def __init__(self, in_features, out_features, bias=True, logscale_factor=3.0): """ Linear layer with zero initialization :param in_features: size of each input sample :type in_features: int :param out_features: size of each output sample :type out_features: int :param bias: whether to learn an additive bias. :type bias: bool :param logscale_factor: factor of logscale :type logscale_factor: float """ super().__init__(in_features, out_features, bias) self.logscale_factor = logscale_factor self.weight.data.zero_() self.bias.data.zero_() self.register_parameter('logs', nn.Parameter(torch.zeros(out_features)) ) def forward(self, x): """ Forward linear zero layer :param x: input :type x: torch.Tensor :return: output :rtype: torch.Tensor """ output = super().forward(x) output *= torch.exp(self.logs * self.logscale_factor) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_mul_view_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tl.store(in_out_ptr0 + x4, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_exp_mul_view_0[grid(256)](buf2, buf0, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class LinearZerosNew(nn.Linear): def __init__(self, in_features, out_features, bias=True, logscale_factor=3.0): """ Linear layer with zero initialization :param in_features: size of each input sample :type in_features: int :param out_features: size of each output sample :type out_features: int :param bias: whether to learn an additive bias. :type bias: bool :param logscale_factor: factor of logscale :type logscale_factor: float """ super().__init__(in_features, out_features, bias) self.logscale_factor = logscale_factor self.weight.data.zero_() self.bias.data.zero_() self.register_parameter('logs', nn.Parameter(torch.zeros(out_features)) ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_4 = self.logs primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Eladhi/VI_Glow
LinearZeros
false
5,122
[ "MIT" ]
1
9c48fbf8fa10c81fc2354a07fcc2837a77d06cef
https://github.com/Eladhi/VI_Glow/tree/9c48fbf8fa10c81fc2354a07fcc2837a77d06cef
import torch import torch.nn as nn class Model(nn.Linear): def __init__(self, in_features, out_features, bias=True, logscale_factor=3.0): """ Linear layer with zero initialization :param in_features: size of each input sample :type in_features: int :param out_features: size of each output sample :type out_features: int :param bias: whether to learn an additive bias. :type bias: bool :param logscale_factor: factor of logscale :type logscale_factor: float """ super().__init__(in_features, out_features, bias) self.logscale_factor = logscale_factor self.weight.data.zero_() self.bias.data.zero_() self.register_parameter('logs', nn.Parameter(torch.zeros(out_features)) ) def forward(self, x): """ Forward linear zero layer :param x: input :type x: torch.Tensor :return: output :rtype: torch.Tensor """ output = super().forward(x) output *= torch.exp(self.logs * self.logscale_factor) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SSGC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/37/c37syvo63vdmotuxpfmllazayh63ckflzom725nffg466fmtlhph.py # Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, x_out_11, mul_10, x_out_12], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # mul_1 => mul_1 # mul_10 => mul_10 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_8 => mul_8 # mul_9 => mul_9 # x_out_1 => mul # x_out_10 => add_9 # x_out_11 => div # x_out_12 => add_10 # x_out_2 => add_1 # x_out_3 => add_2 # x_out_4 => add_3 # x_out_5 => add_4 # x_out_6 => add_5 # x_out_7 => add_6 # x_out_8 => add_7 # x_out_9 => add_8 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_1, 0.9), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 0.9), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_3, 0.9), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 0.9), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_5, 0.9), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_6, 0.9), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_7, 0.9), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_6), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_8, 0.9), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_9, 0.9), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_8), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_10, 0.9), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_9), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_9, 10), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 0.1), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_10), kwargs = {}) triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (x0), xmask) tmp6 = tl.load(in_ptr1 + (x0), xmask) tmp9 = tl.load(in_ptr2 + (x0), xmask) tmp12 = tl.load(in_ptr3 + (x0), xmask) tmp15 = tl.load(in_ptr4 + (x0), xmask) tmp18 = tl.load(in_ptr5 + (x0), xmask) tmp21 = tl.load(in_ptr6 + (x0), xmask) tmp24 = tl.load(in_ptr7 + (x0), xmask) tmp27 = tl.load(in_ptr8 + (x0), xmask) tmp32 = tl.load(in_out_ptr1 + (x0), xmask) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp1 tmp14 = tmp11 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp1 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp1 tmp23 = tmp20 + tmp22 tmp25 = tmp24 * tmp1 tmp26 = tmp23 + tmp25 tmp28 = tmp27 * tmp1 tmp29 = tmp26 + tmp28 tmp30 = 0.1 tmp31 = tmp29 * tmp30 tmp33 = tmp32 * tmp30 tmp34 = tmp31 + tmp33 tl.store(in_out_ptr1 + (x0), tmp34, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf1, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf2, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf4, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf5, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf6, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf7, out=buf8) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf8, out=buf9) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf9, out=buf11) buf10 = buf1; del buf1 # reuse buf12 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, x_out_11, mul_10, x_out_12], Original ATen: [aten.add, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_0.run(buf10, buf12, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf11, 16, grid=grid(16), stream=stream0) del buf10 del buf11 del buf2 del buf3 del buf4 del buf5 del buf6 del buf7 del buf8 del buf9 return (buf12, primals_1, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SpectralEigenConv(nn.Module): def __init__(self, in_features, out_features, bias=False, K=10, alpha= 0.1, **kwargs): super().__init__() assert K > 0 self.K = K self.alpha = alpha self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) def forward(self, x, U, V=None): """ x: node attribute matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.w(x) if V is not None: V_pow = torch.ones_like(V) V_out = torch.zeros_like(V) for _ in range(self.K): V_pow *= V V_out += (1 - self.alpha) * V_pow V_out = V_out / self.K x_out = U * V_out @ (U.t() @ x) + self.alpha * x else: adj = U x_in = x x_out = torch.zeros_like(x) for _ in range(self.K): x = torch.spmm(adj, x) x_out += (1 - self.alpha) * x x_out /= self.K x_out += self.alpha * x_in return x_out def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})' ) class SSGC(nn.Module): def __init__(self, in_features, out_features, K=10, alpha=0.1, hids=[], acts=[], dropout=0.5, bias=False): super().__init__() lin = [] lin.append(nn.Dropout(dropout)) for hid, act in zip(hids, acts): lin.append(nn.Linear(in_features, hid, bias=bias)) lin.append(activations.get(act)) lin.append(nn.Dropout(dropout)) in_features = hid lin = nn.Sequential(*lin) conv = SpectralEigenConv(in_features, out_features, bias=bias, K=K, alpha=alpha) self.lin = lin self.conv = conv def forward(self, x, U, V=None): """ x: feature matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.lin(x) x = self.conv(x, U, V) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp9 = tl.load(in_ptr2 + x0, xmask) tmp12 = tl.load(in_ptr3 + x0, xmask) tmp15 = tl.load(in_ptr4 + x0, xmask) tmp18 = tl.load(in_ptr5 + x0, xmask) tmp21 = tl.load(in_ptr6 + x0, xmask) tmp24 = tl.load(in_ptr7 + x0, xmask) tmp27 = tl.load(in_ptr8 + x0, xmask) tmp32 = tl.load(in_out_ptr1 + x0, xmask) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp1 tmp14 = tmp11 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp1 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp1 tmp23 = tmp20 + tmp22 tmp25 = tmp24 * tmp1 tmp26 = tmp23 + tmp25 tmp28 = tmp27 * tmp1 tmp29 = tmp26 + tmp28 tmp30 = 0.1 tmp31 = tmp29 * tmp30 tmp33 = tmp32 * tmp30 tmp34 = tmp31 + tmp33 tl.store(in_out_ptr1 + x0, tmp34, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf1, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf2, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf4, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf5, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf6, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf7, out=buf8) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf8, out=buf9) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf9, out=buf11) buf10 = buf1 del buf1 buf12 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_0[grid(16)](buf10, buf12, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf10 del buf11 del buf2 del buf3 del buf4 del buf5 del buf6 del buf7 del buf8 del buf9 return buf12, primals_1, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0) class SpectralEigenConv(nn.Module): def __init__(self, in_features, out_features, bias=False, K=10, alpha= 0.1, **kwargs): super().__init__() assert K > 0 self.K = K self.alpha = alpha self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) def forward(self, x, U, V=None): """ x: node attribute matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.w(x) if V is not None: V_pow = torch.ones_like(V) V_out = torch.zeros_like(V) for _ in range(self.K): V_pow *= V V_out += (1 - self.alpha) * V_pow V_out = V_out / self.K x_out = U * V_out @ (U.t() @ x) + self.alpha * x else: adj = U x_in = x x_out = torch.zeros_like(x) for _ in range(self.K): x = torch.spmm(adj, x) x_out += (1 - self.alpha) * x x_out /= self.K x_out += self.alpha * x_in return x_out def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})' ) class SSGCNew(nn.Module): def __init__(self, in_features, out_features, K=10, alpha=0.1, hids=[], acts=[], dropout=0.5, bias=False): super().__init__() lin = [] lin.append(nn.Dropout(dropout)) for hid, act in zip(hids, acts): lin.append(nn.Linear(in_features, hid, bias=bias)) lin.append(activations.get(act)) lin.append(nn.Dropout(dropout)) in_features = hid lin = nn.Sequential(*lin) conv = SpectralEigenConv(in_features, out_features, bias=bias, K=K, alpha=alpha) self.lin = lin self.conv = conv def forward(self, input_0, input_1): primals_1 = self.conv.w.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
EdisonLeeeee/Graphgallery
SSGC
false
5,123
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
import torch import torch.nn as nn class SpectralEigenConv(nn.Module): def __init__(self, in_features, out_features, bias=False, K=10, alpha= 0.1, **kwargs): super().__init__() assert K > 0 self.K = K self.alpha = alpha self.in_features = in_features self.out_features = out_features self.w = nn.Linear(in_features, out_features, bias=bias) def forward(self, x, U, V=None): """ x: node attribute matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.w(x) if V is not None: V_pow = torch.ones_like(V) V_out = torch.zeros_like(V) for _ in range(self.K): V_pow *= V V_out += (1 - self.alpha) * V_pow V_out = V_out / self.K x_out = U * V_out @ (U.t() @ x) + self.alpha * x else: adj = U x_in = x x_out = torch.zeros_like(x) for _ in range(self.K): x = torch.spmm(adj, x) x_out += (1 - self.alpha) * x x_out /= self.K x_out += self.alpha * x_in return x_out def reset_parameters(self): self.w.reset_parameters() def __repr__(self): return ( f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})' ) class Model(nn.Module): def __init__(self, in_features, out_features, K=10, alpha=0.1, hids=[], acts=[], dropout=0.5, bias=False): super().__init__() lin = [] lin.append(nn.Dropout(dropout)) for hid, act in zip(hids, acts): lin.append(nn.Linear(in_features, hid, bias=bias)) lin.append(activations.get(act)) lin.append(nn.Dropout(dropout)) in_features = hid lin = nn.Sequential(*lin) conv = SpectralEigenConv(in_features, out_features, bias=bias, K=K, alpha=alpha) self.lin = lin self.conv = conv def forward(self, x, U, V=None): """ x: feature matrix if `V=None`: U: (N, N) adjacency matrix else: U: (N, k) eigenvector matrix V: (k,) eigenvalue """ x = self.lin(x) x = self.conv(x, U, V) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
Conv2dZeros
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zv/czvhqogqibwnm23q44xn6gzvpm2ac5f4wseb2e7zgfnmpvynwgoy.py # Topologically Sorted Source Nodes: [x, mul, exp, x_1], Original ATen: [aten.convolution, aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # x => convolution # x_1 => mul_1 # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {}) triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, mul, exp, x_1], Original ATen: [aten.convolution, aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ActNorm(nn.Module): def __init__(self, num_channels, scale=1.0, logscale_factor=3.0, batch_variance=False): """ Activation normalization layer :param num_channels: number of channels :type num_channels: int :param scale: scale :type scale: float :param logscale_factor: factor for logscale :type logscale_factor: float :param batch_variance: use batch variance :type batch_variance: bool """ super().__init__() self.num_channels = num_channels self.scale = scale self.logscale_factor = logscale_factor self.batch_variance = batch_variance self.bias_inited = False self.logs_inited = False self.register_parameter('bias', nn.Parameter(torch.zeros(1, self. num_channels, 1, 1))) self.register_parameter('logs', nn.Parameter(torch.zeros(1, self. num_channels, 1, 1))) def actnorm_center(self, x, reverse=False): """ center operation of activation normalization :param x: input :type x: torch.Tensor :param reverse: whether to reverse bias :type reverse: bool :return: centered input :rtype: torch.Tensor """ if not self.bias_inited: self.initialize_bias(x) if not reverse: return x + self.bias else: return x - self.bias def actnorm_scale(self, x, logdet, reverse=False): """ scale operation of activation normalization :param x: input :type x: torch.Tensor :param logdet: log determinant :type logdet: :param reverse: whether to reverse bias :type reverse: bool :return: centered input and logdet :rtype: tuple(torch.Tensor, torch.Tensor) """ if not self.logs_inited: self.initialize_logs(x) logs = self.logs * self.logscale_factor if not reverse: x *= torch.exp(logs) else: x *= torch.exp(-logs) if logdet is not None: logdet_factor = ops.count_pixels(x) dlogdet = torch.sum(logs) * logdet_factor if reverse: dlogdet *= -1 logdet += dlogdet return x, logdet def initialize_bias(self, x): """ Initialize bias :param x: input :type x: torch.Tensor """ if not self.training: return with torch.no_grad(): x_mean = -1.0 * ops.reduce_mean(x, dim=[0, 2, 3], keepdim=True) self.bias.data.copy_(x_mean.data) self.bias_inited = True def initialize_logs(self, x): """ Initialize logs :param x: input :type x: torch.Tensor """ if not self.training: return with torch.no_grad(): if self.batch_variance: x_var = ops.reduce_mean(x ** 2, keepdim=True) else: x_var = ops.reduce_mean(x ** 2, dim=[0, 2, 3], keepdim=True) logs = torch.log(self.scale / (torch.sqrt(x_var) + 1e-06) ) / self.logscale_factor self.logs.data.copy_(logs.data) self.logs_inited = True def forward(self, x, logdet=None, reverse=False): """ Forward activation normalization layer :param x: input :type x: torch.Tensor :param logdet: log determinant :type logdet: :param reverse: whether to reverse bias :type reverse: bool :return: normalized input and logdet :rtype: tuple(torch.Tensor, torch.Tensor) """ assert len(x.shape) == 4 assert x.shape[1 ] == self.num_channels, 'Input shape should be NxCxHxW, however channels are {} instead of {}'.format( x.shape[1], self.num_channels) assert x.device == self.bias.device and x.device == self.logs.device, 'Expect input device {} instead of {}'.format( self.bias.device, x.device) if not reverse: x = self.actnorm_center(x, reverse=False) x, logdet = self.actnorm_scale(x, logdet, reverse=False) else: x, logdet = self.actnorm_scale(x, logdet, reverse=True) x = self.actnorm_center(x, reverse=True) return x, logdet class Conv2d(nn.Conv2d): @staticmethod def get_padding(padding_type, kernel_size, stride): """ Get padding size. mentioned in https://github.com/pytorch/pytorch/issues/3867#issuecomment-361775080 behaves as 'SAME' padding in TensorFlow independent on input size when stride is 1 :param padding_type: type of padding in ['SAME', 'VALID'] :type padding_type: str :param kernel_size: kernel size :type kernel_size: tuple(int) or int :param stride: stride :type stride: int :return: padding size :rtype: tuple(int) """ assert padding_type in ['SAME', 'VALID' ], 'Unsupported padding type: {}'.format(padding_type) if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if padding_type == 'SAME': assert stride == 1, "'SAME' padding only supports stride=1" return tuple((k - 1) // 2 for k in kernel_size) return tuple(0 for _ in kernel_size) def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=1, padding_type='SAME', do_weightnorm=False, do_actnorm=True, dilation=1, groups=1): """ Wrapper of nn.Conv2d with weight normalization and activation normalization :param padding_type: type of padding in ['SAME', 'VALID'] :type padding_type: str :param do_weightnorm: whether to do weight normalization after convolution :type do_weightnorm: bool :param do_actnorm: whether to do activation normalization after convolution :type do_actnorm: bool """ padding = self.get_padding(padding_type, kernel_size, stride) super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=not do_actnorm) self.do_weight_norm = do_weightnorm self.do_actnorm = do_actnorm self.weight.data.normal_(mean=0.0, std=0.05) if self.do_actnorm: self.actnorm = ActNorm(out_channels) else: self.bias.data.zero_() def forward(self, x): """ Forward wrapped Conv2d layer :param x: input :type x: torch.Tensor :return: output :rtype: torch.Tensor """ x = super().forward(x) if self.do_actnorm: x, _ = self.actnorm(x) return x class Conv2dZeros(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=1, padding_type='SAME', logscale_factor=3, dilation=1, groups=1, bias=True): """ Wrapper of nn.Conv2d with zero initialization and logs :param padding_type: type of padding in ['SAME', 'VALID'] :type padding_type: str :param logscale_factor: factor for logscale :type logscale_factor: float """ padding = Conv2d.get_padding(padding_type, kernel_size, stride) super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.logscale_factor = logscale_factor self.bias.data.zero_() self.weight.data.zero_() self.register_parameter('logs', nn.Parameter(torch.zeros( out_channels, 1, 1))) def forward(self, x): """ Forward wrapped Conv2d layer :param x: input :type x: torch.Tensor :return: output :rtype: torch.Tensor """ x = super().forward(x) x *= torch.exp(self.logs * self.logscale_factor) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 class ActNorm(nn.Module): def __init__(self, num_channels, scale=1.0, logscale_factor=3.0, batch_variance=False): """ Activation normalization layer :param num_channels: number of channels :type num_channels: int :param scale: scale :type scale: float :param logscale_factor: factor for logscale :type logscale_factor: float :param batch_variance: use batch variance :type batch_variance: bool """ super().__init__() self.num_channels = num_channels self.scale = scale self.logscale_factor = logscale_factor self.batch_variance = batch_variance self.bias_inited = False self.logs_inited = False self.register_parameter('bias', nn.Parameter(torch.zeros(1, self. num_channels, 1, 1))) self.register_parameter('logs', nn.Parameter(torch.zeros(1, self. num_channels, 1, 1))) def actnorm_center(self, x, reverse=False): """ center operation of activation normalization :param x: input :type x: torch.Tensor :param reverse: whether to reverse bias :type reverse: bool :return: centered input :rtype: torch.Tensor """ if not self.bias_inited: self.initialize_bias(x) if not reverse: return x + self.bias else: return x - self.bias def actnorm_scale(self, x, logdet, reverse=False): """ scale operation of activation normalization :param x: input :type x: torch.Tensor :param logdet: log determinant :type logdet: :param reverse: whether to reverse bias :type reverse: bool :return: centered input and logdet :rtype: tuple(torch.Tensor, torch.Tensor) """ if not self.logs_inited: self.initialize_logs(x) logs = self.logs * self.logscale_factor if not reverse: x *= torch.exp(logs) else: x *= torch.exp(-logs) if logdet is not None: logdet_factor = ops.count_pixels(x) dlogdet = torch.sum(logs) * logdet_factor if reverse: dlogdet *= -1 logdet += dlogdet return x, logdet def initialize_bias(self, x): """ Initialize bias :param x: input :type x: torch.Tensor """ if not self.training: return with torch.no_grad(): x_mean = -1.0 * ops.reduce_mean(x, dim=[0, 2, 3], keepdim=True) self.bias.data.copy_(x_mean.data) self.bias_inited = True def initialize_logs(self, x): """ Initialize logs :param x: input :type x: torch.Tensor """ if not self.training: return with torch.no_grad(): if self.batch_variance: x_var = ops.reduce_mean(x ** 2, keepdim=True) else: x_var = ops.reduce_mean(x ** 2, dim=[0, 2, 3], keepdim=True) logs = torch.log(self.scale / (torch.sqrt(x_var) + 1e-06) ) / self.logscale_factor self.logs.data.copy_(logs.data) self.logs_inited = True def forward(self, x, logdet=None, reverse=False): """ Forward activation normalization layer :param x: input :type x: torch.Tensor :param logdet: log determinant :type logdet: :param reverse: whether to reverse bias :type reverse: bool :return: normalized input and logdet :rtype: tuple(torch.Tensor, torch.Tensor) """ assert len(x.shape) == 4 assert x.shape[1 ] == self.num_channels, 'Input shape should be NxCxHxW, however channels are {} instead of {}'.format( x.shape[1], self.num_channels) assert x.device == self.bias.device and x.device == self.logs.device, 'Expect input device {} instead of {}'.format( self.bias.device, x.device) if not reverse: x = self.actnorm_center(x, reverse=False) x, logdet = self.actnorm_scale(x, logdet, reverse=False) else: x, logdet = self.actnorm_scale(x, logdet, reverse=True) x = self.actnorm_center(x, reverse=True) return x, logdet class Conv2d(nn.Conv2d): @staticmethod def get_padding(padding_type, kernel_size, stride): """ Get padding size. mentioned in https://github.com/pytorch/pytorch/issues/3867#issuecomment-361775080 behaves as 'SAME' padding in TensorFlow independent on input size when stride is 1 :param padding_type: type of padding in ['SAME', 'VALID'] :type padding_type: str :param kernel_size: kernel size :type kernel_size: tuple(int) or int :param stride: stride :type stride: int :return: padding size :rtype: tuple(int) """ assert padding_type in ['SAME', 'VALID' ], 'Unsupported padding type: {}'.format(padding_type) if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if padding_type == 'SAME': assert stride == 1, "'SAME' padding only supports stride=1" return tuple((k - 1) // 2 for k in kernel_size) return tuple(0 for _ in kernel_size) def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=1, padding_type='SAME', do_weightnorm=False, do_actnorm=True, dilation=1, groups=1): """ Wrapper of nn.Conv2d with weight normalization and activation normalization :param padding_type: type of padding in ['SAME', 'VALID'] :type padding_type: str :param do_weightnorm: whether to do weight normalization after convolution :type do_weightnorm: bool :param do_actnorm: whether to do activation normalization after convolution :type do_actnorm: bool """ padding = self.get_padding(padding_type, kernel_size, stride) super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=not do_actnorm) self.do_weight_norm = do_weightnorm self.do_actnorm = do_actnorm self.weight.data.normal_(mean=0.0, std=0.05) if self.do_actnorm: self.actnorm = ActNorm(out_channels) else: self.bias.data.zero_() def forward(self, x): """ Forward wrapped Conv2d layer :param x: input :type x: torch.Tensor :return: output :rtype: torch.Tensor """ x = super().forward(x) if self.do_actnorm: x, _ = self.actnorm(x) return x class Conv2dZerosNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=1, padding_type='SAME', logscale_factor=3, dilation=1, groups=1, bias=True): """ Wrapper of nn.Conv2d with zero initialization and logs :param padding_type: type of padding in ['SAME', 'VALID'] :type padding_type: str :param logscale_factor: factor for logscale :type logscale_factor: float """ padding = Conv2d.get_padding(padding_type, kernel_size, stride) super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.logscale_factor = logscale_factor self.bias.data.zero_() self.weight.data.zero_() self.register_parameter('logs', nn.Parameter(torch.zeros( out_channels, 1, 1))) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_4 = self.logs primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Eladhi/VI_Glow
Conv2dZeros
false
5,124
[ "MIT" ]
1
9c48fbf8fa10c81fc2354a07fcc2837a77d06cef
https://github.com/Eladhi/VI_Glow/tree/9c48fbf8fa10c81fc2354a07fcc2837a77d06cef
import torch import torch.nn as nn class ActNorm(nn.Module): def __init__(self, num_channels, scale=1.0, logscale_factor=3.0, batch_variance=False): """ Activation normalization layer :param num_channels: number of channels :type num_channels: int :param scale: scale :type scale: float :param logscale_factor: factor for logscale :type logscale_factor: float :param batch_variance: use batch variance :type batch_variance: bool """ super().__init__() self.num_channels = num_channels self.scale = scale self.logscale_factor = logscale_factor self.batch_variance = batch_variance self.bias_inited = False self.logs_inited = False self.register_parameter('bias', nn.Parameter(torch.zeros(1, self. num_channels, 1, 1))) self.register_parameter('logs', nn.Parameter(torch.zeros(1, self. num_channels, 1, 1))) def actnorm_center(self, x, reverse=False): """ center operation of activation normalization :param x: input :type x: torch.Tensor :param reverse: whether to reverse bias :type reverse: bool :return: centered input :rtype: torch.Tensor """ if not self.bias_inited: self.initialize_bias(x) if not reverse: return x + self.bias else: return x - self.bias def actnorm_scale(self, x, logdet, reverse=False): """ scale operation of activation normalization :param x: input :type x: torch.Tensor :param logdet: log determinant :type logdet: :param reverse: whether to reverse bias :type reverse: bool :return: centered input and logdet :rtype: tuple(torch.Tensor, torch.Tensor) """ if not self.logs_inited: self.initialize_logs(x) logs = self.logs * self.logscale_factor if not reverse: x *= torch.exp(logs) else: x *= torch.exp(-logs) if logdet is not None: logdet_factor = ops.count_pixels(x) dlogdet = torch.sum(logs) * logdet_factor if reverse: dlogdet *= -1 logdet += dlogdet return x, logdet def initialize_bias(self, x): """ Initialize bias :param x: input :type x: torch.Tensor """ if not self.training: return with torch.no_grad(): x_mean = -1.0 * ops.reduce_mean(x, dim=[0, 2, 3], keepdim=True) self.bias.data.copy_(x_mean.data) self.bias_inited = True def initialize_logs(self, x): """ Initialize logs :param x: input :type x: torch.Tensor """ if not self.training: return with torch.no_grad(): if self.batch_variance: x_var = ops.reduce_mean(x ** 2, keepdim=True) else: x_var = ops.reduce_mean(x ** 2, dim=[0, 2, 3], keepdim=True) logs = torch.log(self.scale / (torch.sqrt(x_var) + 1e-06) ) / self.logscale_factor self.logs.data.copy_(logs.data) self.logs_inited = True def forward(self, x, logdet=None, reverse=False): """ Forward activation normalization layer :param x: input :type x: torch.Tensor :param logdet: log determinant :type logdet: :param reverse: whether to reverse bias :type reverse: bool :return: normalized input and logdet :rtype: tuple(torch.Tensor, torch.Tensor) """ assert len(x.shape) == 4 assert x.shape[1 ] == self.num_channels, 'Input shape should be NxCxHxW, however channels are {} instead of {}'.format( x.shape[1], self.num_channels) assert x. # ... truncated (>4000 chars) for memory efficiency
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7b/c7bsbu5nqjwno7oolhruidochm2rierdki7fkzahol2dvs7dgv5t.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gm/cgmflgdlpeeb52xctoa47uvw47ycyf7ahlj5wdscxdatpbwcboco.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5q/c5qmnkuxezgezseizmolw3mx24fyy6xp3cfoz3egpqwcprxgwjre.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf6, primals_7, primals_3, 256, grid=grid(256), stream=stream0) del primals_7 return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, buf7, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.distributed class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """ Layer definition. Args: input: [ batch_size, input_len, model_dim ] Returns: output: [ batch_size, input_len, model_dim ] """ inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4, primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (64, 4), (4, 1), 0 ), primals_6, buf7, primals_4 class PositionwiseFeedForwardNew(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, input_0): primals_4 = self.w_1.weight primals_1 = self.w_1.bias primals_6 = self.w_2.weight primals_2 = self.w_2.bias primals_5 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Eldriann/Master-thesis
PositionwiseFeedForward
false
5,125
[ "MIT" ]
1
9d09d97f4002cc9fc730f10317614e1d0d307353
https://github.com/Eldriann/Master-thesis/tree/9d09d97f4002cc9fc730f10317614e1d0d307353
import torch import torch.nn as nn import torch.distributed class Model(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, d_ff, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """ Layer definition. Args: input: [ batch_size, input_len, model_dim ] Returns: output: [ batch_size, input_len, model_dim ] """ inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BartClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.tanh] # Source node to ATen node mapping: # hidden_states_2 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.checkpoint class BartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: 'int', inner_dim: 'int', num_classes: 'int', pooler_dropout: 'float'): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: 'torch.Tensor'): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'inner_dim': 4, 'num_classes': 4, 'pooler_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class BartClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: 'int', inner_dim: 'int', num_classes: 'int', pooler_dropout: 'float'): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Elvisambition/bert_seq2seq
BartClassificationHead
false
5,126
[ "Apache-2.0" ]
1
643ac537c16872f0d13200de06001d8201a54fbb
https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb
import torch import torch.nn as nn import torch.utils.checkpoint class Model(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: 'int', inner_dim: 'int', num_classes: 'int', pooler_dropout: 'float'): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: 'torch.Tensor'): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'inner_dim': 4, 'num_classes': 4, 'pooler_dropout': 0.5}]
DummyModelWithSharedSubmodule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf4, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class DummyDenseWithRelu(nn.Module): def __init__(self, input_size, output_size, relu=None): super(DummyDenseWithRelu, self).__init__() self.input_size = input_size self.output_size = output_size self.relu = relu or nn.ReLU() self.linear = nn.Linear(input_size, output_size) def forward(self, x): return self.relu(self.linear(x)) class DummyModelWithSharedSubmodule(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(DummyModelWithSharedSubmodule, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.dense1 = DummyDenseWithRelu(input_size, hidden_size) self.dense2 = DummyDenseWithRelu(hidden_size, output_size, self. dense1.relu) def forward(self, x): x = self.dense1(x) x = self.dense2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5 class DummyDenseWithRelu(nn.Module): def __init__(self, input_size, output_size, relu=None): super(DummyDenseWithRelu, self).__init__() self.input_size = input_size self.output_size = output_size self.relu = relu or nn.ReLU() self.linear = nn.Linear(input_size, output_size) def forward(self, x): return self.relu(self.linear(x)) class DummyModelWithSharedSubmoduleNew(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(DummyModelWithSharedSubmoduleNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.dense1 = DummyDenseWithRelu(input_size, hidden_size) self.dense2 = DummyDenseWithRelu(hidden_size, output_size, self. dense1.relu) def forward(self, input_0): primals_1 = self.dense1.linear.weight primals_2 = self.dense1.linear.bias primals_4 = self.dense2.linear.weight primals_5 = self.dense2.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Emily0219/distiller
DummyModelWithSharedSubmodule
false
5,127
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class DummyDenseWithRelu(nn.Module): def __init__(self, input_size, output_size, relu=None): super().__init__() self.input_size = input_size self.output_size = output_size self.relu = relu or nn.ReLU() self.linear = nn.Linear(input_size, output_size) def forward(self, x): return self.relu(self.linear(x)) class Model(nn.Module): def __init__(self, input_size, hidden_size, output_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.dense1 = DummyDenseWithRelu(input_size, hidden_size) self.dense2 = DummyDenseWithRelu(hidden_size, output_size, self. dense1.relu) def forward(self, x): x = self.dense1(x) x = self.dense2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
DummyDenseWithRelu
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class DummyDenseWithRelu(nn.Module): def __init__(self, input_size, output_size, relu=None): super(DummyDenseWithRelu, self).__init__() self.input_size = input_size self.output_size = output_size self.relu = relu or nn.ReLU() self.linear = nn.Linear(input_size, output_size) def forward(self, x): return self.relu(self.linear(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class DummyDenseWithReluNew(nn.Module): def __init__(self, input_size, output_size, relu=None): super(DummyDenseWithReluNew, self).__init__() self.input_size = input_size self.output_size = output_size self.relu = relu or nn.ReLU() self.linear = nn.Linear(input_size, output_size) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Emily0219/distiller
DummyDenseWithRelu
false
5,128
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Model(nn.Module): def __init__(self, input_size, output_size, relu=None): super().__init__() self.input_size = input_size self.output_size = output_size self.relu = relu or nn.ReLU() self.linear = nn.Linear(input_size, output_size) def forward(self, x): return self.relu(self.linear(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vu/cvuv73ijvl45rys2kvk2dud7shg4nznn622fzyldpf7pmxppx3o5.py # Topologically Sorted Source Nodes: [proj], Original ATen: [aten.tanh] # Source node to ATen node mapping: # proj => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7f/c7fckd4hiui2pmwnc5cy44b665px4tvaslalamounqk7duyqubll.py # Topologically Sorted Source Nodes: [mul, context], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # context => sum_2 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %primals_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-2]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask) tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [proj], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul, context], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(buf4, primals_1, buf5, 16, grid=grid(16), stream=stream0) return (buf5, buf4, primals_1, buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.distributed class SelfAttention(nn.Module): def __init__(self, model_dim, dropout=0.1): super(SelfAttention, self).__init__() self.Va = nn.Linear(model_dim, 1, bias=False) self.Wa = nn.Linear(model_dim, model_dim) self.dropout = nn.Dropout(dropout) def forward(self, x, mask=None): b, t, n = x.size() proj = torch.tanh(self.Wa(x.view(b * t, n).contiguous())) scores = self.Va(proj) scores = scores.view(b, t).contiguous() if mask is not None: scores = scores.masked_fill(mask, -1e+18) attn = torch.softmax(scores, -1) drop_attn = self.dropout(attn) context = torch.sum(drop_attn.unsqueeze(-1) * x, -2) return context, attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 1), (1, 4 ), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused_mul_sum_3[grid(16)](buf4, primals_1, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf5, buf4, primals_1, buf1, buf4, primals_4 class SelfAttentionNew(nn.Module): def __init__(self, model_dim, dropout=0.1): super(SelfAttentionNew, self).__init__() self.Va = nn.Linear(model_dim, 1, bias=False) self.Wa = nn.Linear(model_dim, model_dim) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_4 = self.Va.weight primals_2 = self.Wa.weight primals_3 = self.Wa.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
Eldriann/Master-thesis
SelfAttention
false
5,129
[ "MIT" ]
1
9d09d97f4002cc9fc730f10317614e1d0d307353
https://github.com/Eldriann/Master-thesis/tree/9d09d97f4002cc9fc730f10317614e1d0d307353
import torch import torch.nn as nn import torch.distributed class Model(nn.Module): def __init__(self, model_dim, dropout=0.1): super().__init__() self.Va = nn.Linear(model_dim, 1, bias=False) self.Wa = nn.Linear(model_dim, model_dim) self.dropout = nn.Dropout(dropout) def forward(self, x, mask=None): b, t, n = x.size() proj = torch.tanh(self.Wa(x.view(b * t, n).contiguous())) scores = self.Va(proj) scores = scores.view(b, t).contiguous() if mask is not None: scores = scores.masked_fill(mask, -1e+18) attn = torch.softmax(scores, -1) drop_attn = self.dropout(attn) context = torch.sum(drop_attn.unsqueeze(-1) * x, -2) return context, attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
ContentLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ty/ctyujcegcomjiz2en5pnivkmeu7rrapnbfwd4wg7gb3ywjz4ri7x.py # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, mul], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] # Source node to ATen node mapping: # mul => mul # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) triton_per_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, mul], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ContentLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return 0.5 * torch.sum((x - y) ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class ContentLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Enigmatisms/NeuralStyle
ContentLoss
false
5,130
[ "Apache-2.0" ]
1
27b435b5c51b41427e9f465793a0b81ad7248ab8
https://github.com/Enigmatisms/NeuralStyle/tree/27b435b5c51b41427e9f465793a0b81ad7248ab8
import torch from torch import nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return 0.5 * torch.sum((x - y) ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iq/ciqftckmgqckpktocc4vnzvzxloichfo465idr3x42oonjq74tp7.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [alignment], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) return (reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class BahdanauAttention(nn.Module): def __init__(self, dim): super(BahdanauAttention, self).__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, query, processed_memory): """ Args: query: (batch, 1, dim) or (batch, dim) processed_memory: (batch, max_time, dim) """ if query.dim() == 2: query = query.unsqueeze(1) processed_query = self.query_layer(query) alignment = self.v(self.tanh(processed_query + processed_memory)) return alignment.squeeze(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class BahdanauAttentionNew(nn.Module): def __init__(self, dim): super(BahdanauAttentionNew, self).__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, input_0, input_1): primals_2 = self.query_layer.weight primals_4 = self.v.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Emotional-Text-to-Speech/tacotron_pytorch
BahdanauAttention
false
5,131
[ "MIT" ]
1
e6b1a3907afb01fe31bcbd77c677667adf6733f5
https://github.com/Emotional-Text-to-Speech/tacotron_pytorch/tree/e6b1a3907afb01fe31bcbd77c677667adf6733f5
import torch from torch import nn class Model(nn.Module): def __init__(self, dim): super().__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, query, processed_memory): """ Args: query: (batch, 1, dim) or (batch, dim) processed_memory: (batch, max_time, dim) """ if query.dim() == 2: query = query.unsqueeze(1) processed_query = self.query_layer(query) alignment = self.v(self.tanh(processed_query + processed_memory)) return alignment.squeeze(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
HILL
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ua/cuaa7l3gtv3zkrpmk7p4sxltibskglorn7ravupkeqnbzrnz7242.py # Topologically Sorted Source Nodes: [t1], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # t1 => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad2d_0 = async_compile.triton('triton_poi_fused_replication_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = (xindex // 10) % 10 x2 = (xindex // 100) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-3) + x1)) + ((-3) + x1) * (((-3) + x1) > (0))))) + (((0) * ((0) >= ((-3) + x1)) + ((-3) + x1) * (((-3) + x1) > (0)))) * ((((0) * ((0) >= ((-3) + x1)) + ((-3) + x1) * (((-3) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-3) + x0)) + ((-3) + x0) * (((-3) + x0) > (0))))) + (((0) * ((0) >= ((-3) + x0)) + ((-3) + x0) * (((-3) + x0) > (0)))) * ((((0) * ((0) >= ((-3) + x0)) + ((-3) + x0) * (((-3) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/p3/cp3tylgyuoamtlltnr7pojny2vbnvfxvhg6zp7yqctp7cuailjpi.py # Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs] # Source node to ATen node mapping: # abs_1 => abs_1 # Graph fragment: # %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%convolution,), kwargs = {}) triton_poi_fused_abs_1 = async_compile.triton('triton_poi_fused_abs_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.abs(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ze/czeo7t5ml7ntyljlxxqp7vnonbmgzmbzlqpv236dlsdhpki26mfk.py # Topologically Sorted Source Nodes: [t3], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # t3 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%abs_1, [3, 3], [1, 1], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_2 = async_compile.triton('triton_poi_fused_avg_pool2d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 10) % 10 x0 = xindex % 10 x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-11) + x4), tmp10 & xmask, other=0.0) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-10) + x4), tmp16 & xmask, other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-9) + x4), tmp23 & xmask, other=0.0) tmp25 = tmp24 + tmp18 tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + x4), tmp30 & xmask, other=0.0) tmp32 = tmp31 + tmp25 tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x4), tmp33 & xmask, other=0.0) tmp35 = tmp34 + tmp32 tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=0.0) tmp38 = tmp37 + tmp35 tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (9 + x4), tmp43 & xmask, other=0.0) tmp45 = tmp44 + tmp38 tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (10 + x4), tmp46 & xmask, other=0.0) tmp48 = tmp47 + tmp45 tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (11 + x4), tmp49 & xmask, other=0.0) tmp51 = tmp50 + tmp48 tmp52 = 1 + ((-1)*x0) + ((-1)*x1) + (x0*x1) + (((11) * ((11) <= (2 + x0)) + (2 + x0) * ((2 + x0) < (11)))*((11) * ((11) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (11)))) + ((-1)*x0*((11) * ((11) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (11)))) + ((-1)*x1*((11) * ((11) <= (2 + x0)) + (2 + x0) * ((2 + x0) < (11)))) + ((11) * ((11) <= (2 + x0)) + (2 + x0) * ((2 + x0) < (11))) + ((11) * ((11) <= (2 + x1)) + (2 + x1) * ((2 + x1) < (11))) tmp53 = tmp51 / tmp52 tl.store(out_ptr0 + (x4), tmp53, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f5/cf5eo7ulbhg34r4prncpvogqmo65maeg63tlcfqg5e4v67wox6ll.py # Topologically Sorted Source Nodes: [add, t4], Original ATen: [aten.add, aten.reciprocal, aten.mul] # Source node to ATen node mapping: # add => add # t4 => mul, reciprocal # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, 1e-10), kwargs = {}) # %reciprocal : [num_users=2] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, %reciprocal), kwargs = {}) triton_poi_fused_add_mul_reciprocal_3 = async_compile.triton('triton_poi_fused_add_mul_reciprocal_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_reciprocal_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_reciprocal_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (33 + x0 + (10*x1) + (100*x2)), xmask) tmp1 = 1e-10 tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp2 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp7 = tmp4 * tmp4 tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4f/c4fjhc5lwnqcd46ozbrfxuh5vqfkh5k26in5tu3l6wgl7qeh7nit.py # Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # pad_1 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mul, [None, None, %clamp_max_2, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %clamp_max_2]), kwargs = {}) triton_poi_fused_replication_pad2d_4 = async_compile.triton('triton_poi_fused_replication_pad2d_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = (xindex // 18) % 18 x2 = (xindex // 324) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-7) + x1)) + ((-7) + x1) * (((-7) + x1) > (0))))) + (((0) * ((0) >= ((-7) + x1)) + ((-7) + x1) * (((-7) + x1) > (0)))) * ((((0) * ((0) >= ((-7) + x1)) + ((-7) + x1) * (((-7) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-7) + x0)) + ((-7) + x0) * (((-7) + x0) > (0))))) + (((0) * ((0) >= ((-7) + x0)) + ((-7) + x0) * (((-7) + x0) > (0)))) * ((((0) * ((0) >= ((-7) + x0)) + ((-7) + x0) * (((-7) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 10, 10), (100, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [t1], Original ATen: [aten.replication_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad2d_0.run(primals_1, buf0, 400, grid=grid(400), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [t2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 10, 10), (100, 100, 10, 1)) buf2 = empty_strided_cuda((4, 1, 10, 10), (100, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs] triton_poi_fused_abs_1.run(buf1, buf2, 400, grid=grid(400), stream=stream0) buf3 = empty_strided_cuda((4, 1, 10, 10), (100, 400, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [t3], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_2.run(buf2, buf3, 400, grid=grid(400), stream=stream0) buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, t4], Original ATen: [aten.add, aten.reciprocal, aten.mul] triton_poi_fused_add_mul_reciprocal_3.run(buf3, buf4, buf8, 64, grid=grid(64), stream=stream0) del buf3 buf5 = empty_strided_cuda((4, 1, 18, 18), (324, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad2d] triton_poi_fused_replication_pad2d_4.run(buf4, buf5, 1296, grid=grid(1296), stream=stream0) # Topologically Sorted Source Nodes: [t5], Original ATen: [aten.avg_pool2d] buf6 = torch.ops.aten.avg_pool2d.default(buf5, [15, 15], [1, 1], [0, 0], False, True, None) buf7 = buf6 del buf6 return (buf7, primals_2, buf0, buf1, buf2, buf4, buf5, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class HILL(nn.Module): def __init__(self, img_size): super(HILL, self).__init__() self.img_size = img_size self.pad_3 = nn.ReplicationPad2d(3) self.pad = nn.ReplicationPad2d(7) self.conv1 = nn.Conv2d(1, 1, 3, 1, padding=1, bias=False) self.avepool1 = nn.AvgPool2d(3, stride=1, padding=1) self.avepool2 = nn.AvgPool2d(15, stride=1) self.eps = 1e-10 self.res() def res(self): self.conv1.weight.data = torch.tensor([[-1, 2, -1], [2, -4, 2], [-1, 2, -1]], dtype=torch.float).view(1, 1, 3, 3) def forward(self, x): t1 = self.pad_3(x) t2 = self.conv1(t1) t3 = self.avepool1(torch.abs(t2)) t4 = 1 / (t3[:, :, 3:self.img_size + 3, 3:self.img_size + 3] + self.eps ) t5 = self.avepool2(self.pad(t4)) return t5 def get_inputs(): return [torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {'img_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x2 = xindex // 100 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -3 + x1) + (-3 + x1) * (-3 + x1 > 0)) + (0 * (0 >= -3 + x1) + (-3 + x1) * (-3 + x1 > 0)) * (0 * (0 >= -3 + x1) + (-3 + x1) * (-3 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -3 + x0) + (-3 + x0) * (-3 + x0 > 0)) + (0 * (0 >= -3 + x0) + (-3 + x0) * (-3 + x0 > 0)) * (0 * (0 >= -3 + x0) + (-3 + x0) * (-3 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_abs_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.abs(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 % 10 x0 = xindex % 10 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-11 + x4), tmp10 & xmask, other=0.0) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-10 + x4), tmp16 & xmask, other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-9 + x4), tmp23 & xmask, other=0.0) tmp25 = tmp24 + tmp18 tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=0.0) tmp32 = tmp31 + tmp25 tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=0.0) tmp35 = tmp34 + tmp32 tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=0.0) tmp38 = tmp37 + tmp35 tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (9 + x4), tmp43 & xmask, other=0.0) tmp45 = tmp44 + tmp38 tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (10 + x4), tmp46 & xmask, other=0.0) tmp48 = tmp47 + tmp45 tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (11 + x4), tmp49 & xmask, other=0.0) tmp51 = tmp50 + tmp48 tmp52 = 1 + -1 * x0 + -1 * x1 + x0 * x1 + (11 * (11 <= 2 + x0) + (2 + x0) * (2 + x0 < 11)) * (11 * (11 <= 2 + x1) + (2 + x1) * (2 + x1 < 11) ) + -1 * x0 * (11 * (11 <= 2 + x1) + (2 + x1) * (2 + x1 < 11) ) + -1 * x1 * (11 * (11 <= 2 + x0) + (2 + x0) * (2 + x0 < 11)) + ( 11 * (11 <= 2 + x0) + (2 + x0) * (2 + x0 < 11)) + (11 * (11 <= 2 + x1) + (2 + x1) * (2 + x1 < 11)) tmp53 = tmp51 / tmp52 tl.store(out_ptr0 + x4, tmp53, xmask) @triton.jit def triton_poi_fused_add_mul_reciprocal_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (33 + x0 + 10 * x1 + 100 * x2), xmask) tmp1 = 1e-10 tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp2 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp7 = tmp4 * tmp4 tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_replication_pad2d_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -7 + x1) + (-7 + x1) * (-7 + x1 > 0)) + (0 * (0 >= -7 + x1) + (-7 + x1) * (-7 + x1 > 0)) * (0 * (0 >= -7 + x1) + (-7 + x1) * (-7 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -7 + x0) + (-7 + x0) * (-7 + x0 > 0)) + (0 * (0 >= -7 + x0) + (-7 + x0) * (-7 + x0 > 0)) * (0 * (0 >= -7 + x0) + (-7 + x0) * (-7 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 10, 10), (100, 100, 10, 1), torch. float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(400)](primals_1, buf0, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 10, 10), (100, 100, 10, 1)) buf2 = empty_strided_cuda((4, 1, 10, 10), (100, 100, 10, 1), torch. float32) triton_poi_fused_abs_1[grid(400)](buf1, buf2, 400, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 1, 10, 10), (100, 400, 10, 1), torch. float32) triton_poi_fused_avg_pool2d_2[grid(400)](buf2, buf3, 400, XBLOCK= 128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_reciprocal_3[grid(64)](buf3, buf4, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 buf5 = empty_strided_cuda((4, 1, 18, 18), (324, 324, 18, 1), torch. float32) triton_poi_fused_replication_pad2d_4[grid(1296)](buf4, buf5, 1296, XBLOCK=128, num_warps=4, num_stages=1) buf6 = torch.ops.aten.avg_pool2d.default(buf5, [15, 15], [1, 1], [0, 0], False, True, None) buf7 = buf6 del buf6 return buf7, primals_2, buf0, buf1, buf2, buf4, buf5, buf8 class HILLNew(nn.Module): def __init__(self, img_size): super(HILLNew, self).__init__() self.img_size = img_size self.pad_3 = nn.ReplicationPad2d(3) self.pad = nn.ReplicationPad2d(7) self.conv1 = nn.Conv2d(1, 1, 3, 1, padding=1, bias=False) self.avepool1 = nn.AvgPool2d(3, stride=1, padding=1) self.avepool2 = nn.AvgPool2d(15, stride=1) self.eps = 1e-10 self.res() def res(self): self.conv1.weight.data = torch.tensor([[-1, 2, -1], [2, -4, 2], [-1, 2, -1]], dtype=torch.float).view(1, 1, 3, 3) def forward(self, input_0): primals_2 = self.conv1.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Ekko-zn/StegoAdv
HILL
false
5,132
[ "MIT" ]
1
2852dbc85d66f30efb7127695c0d75806bf4aa4c
https://github.com/Ekko-zn/StegoAdv/tree/2852dbc85d66f30efb7127695c0d75806bf4aa4c
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, img_size): super().__init__() self.img_size = img_size self.pad_3 = nn.ReplicationPad2d(3) self.pad = nn.ReplicationPad2d(7) self.conv1 = nn.Conv2d(1, 1, 3, 1, padding=1, bias=False) self.avepool1 = nn.AvgPool2d(3, stride=1, padding=1) self.avepool2 = nn.AvgPool2d(15, stride=1) self.eps = 1e-10 self.res() def res(self): self.conv1.weight.data = torch.tensor([[-1, 2, -1], [2, -4, 2], [-1, 2, -1]], dtype=torch.float).view(1, 1, 3, 3) def forward(self, x): t1 = self.pad_3(x) t2 = self.conv1(t1) t3 = self.avepool1(torch.abs(t2)) t4 = 1 / (t3[:, :, 3:self.img_size + 3, 3:self.img_size + 3] + self.eps ) t5 = self.avepool2(self.pad(t4)) return t5 def get_inputs(): return [torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [4]
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/b7/cb7d56nsynjgqb2vcl5hn4pw2ygixwzygnx3fl5shcgxjnmiu5df.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone] # Source node to ATen node mapping: # logits => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vq/cvqpprnukykv7fb6t2uveui44qrapemorby5j3fnnfeymwpqwe63.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone] # Source node to ATen node mapping: # logits => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_21, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_21, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [queries], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [keys], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, buf3, 16, 4, grid=grid(16, 4), stream=stream0) buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contexts], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, buf8, 16, 4, grid=grid(16, 4), stream=stream0) buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [contexts], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del buf9 return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class MultiHeadAttention(nn.Module): """ input: query [N, T_q, query_dim] key [N, T_k, key_dim] output: out [N, T_q, num_units] """ def __init__(self, query_dim, key_dim, num_units, num_heads): super(MultiHeadAttention, self).__init__() if key_dim % num_heads != 0: raise ValueError( 'Key depth {} must be divisible by the number of attention heads {}' .format(key_dim, num_heads)) self.num_units = num_units self.num_heads = num_heads self.query_scale = (key_dim // num_heads) ** -0.5 self.query_linear = nn.Linear(query_dim, num_units, bias=False) self.key_linear = nn.Linear(key_dim, num_units, bias=False) self.value_linear = nn.Linear(key_dim, num_units, bias=False) def _split_heads(self, x): """ Split x such to add an extra num_heads dimension Input: x: a Tensor with shape [batch_size, seq_len, depth] Returns: A Tensor with shape [batch_size, num_heads, seq_len, depth / num_heads] """ if len(x.shape) != 3: raise ValueError('x must have rank 3') shape = x.shape return x.view(shape[0], shape[1], self.num_heads, shape[2] // self. num_heads).permute(0, 2, 1, 3) def _merge_heads(self, x): """ Merge the extra num_heads into the last dimension Input: x: a Tensor with shape [batch_size, num_heads, seq_len, depth/num_heads] Output: A Tensor with shape [batch_size, seq_len, depth] """ if len(x.shape) != 4: raise ValueError('x must have rank 4') shape = x.shape return x.permute(0, 2, 1, 3).contiguous().view(shape[0], shape[2], shape[3] * self.num_heads) def forward(self, queries, keys, values): queries = self.query_linear(queries) keys = self.key_linear(keys) values = self.value_linear(values) queries = self._split_heads(queries) keys = self._split_heads(keys) values = self._split_heads(values) queries *= self.query_scale logits = torch.matmul(queries, keys.permute(0, 1, 3, 2)) scores = F.softmax(logits, dim=-1) contexts = torch.matmul(scores, values) contexts = self._merge_heads(contexts) return contexts def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'query_dim': 4, 'key_dim': 4, 'num_units': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, buf3, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(16, 4)](buf1, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(16, 4)](buf2, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadAttentionNew(nn.Module): """ input: query [N, T_q, query_dim] key [N, T_k, key_dim] output: out [N, T_q, num_units] """ def __init__(self, query_dim, key_dim, num_units, num_heads): super(MultiHeadAttentionNew, self).__init__() if key_dim % num_heads != 0: raise ValueError( 'Key depth {} must be divisible by the number of attention heads {}' .format(key_dim, num_heads)) self.num_units = num_units self.num_heads = num_heads self.query_scale = (key_dim // num_heads) ** -0.5 self.query_linear = nn.Linear(query_dim, num_units, bias=False) self.key_linear = nn.Linear(key_dim, num_units, bias=False) self.value_linear = nn.Linear(key_dim, num_units, bias=False) def _split_heads(self, x): """ Split x such to add an extra num_heads dimension Input: x: a Tensor with shape [batch_size, seq_len, depth] Returns: A Tensor with shape [batch_size, num_heads, seq_len, depth / num_heads] """ if len(x.shape) != 3: raise ValueError('x must have rank 3') shape = x.shape return x.view(shape[0], shape[1], self.num_heads, shape[2] // self. num_heads).permute(0, 2, 1, 3) def _merge_heads(self, x): """ Merge the extra num_heads into the last dimension Input: x: a Tensor with shape [batch_size, num_heads, seq_len, depth/num_heads] Output: A Tensor with shape [batch_size, seq_len, depth] """ if len(x.shape) != 4: raise ValueError('x must have rank 4') shape = x.shape return x.permute(0, 2, 1, 3).contiguous().view(shape[0], shape[2], shape[3] * self.num_heads) def forward(self, input_0, input_1, input_2): primals_1 = self.query_linear.weight primals_3 = self.key_linear.weight primals_5 = self.value_linear.weight primals_2 = input_0 primals_4 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Ella77/tacotron2_multispeaker_pytorch
MultiHeadAttention
false
5,133
[ "BSD-3-Clause" ]
1
859eab0a8e3bd7545e623ce47fe1563702d38442
https://github.com/Ella77/tacotron2_multispeaker_pytorch/tree/859eab0a8e3bd7545e623ce47fe1563702d38442
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class Model(nn.Module): """ input: query [N, T_q, query_dim] key [N, T_k, key_dim] output: out [N, T_q, num_units] """ def __init__(self, query_dim, key_dim, num_units, num_heads): super().__init__() if key_dim % num_heads != 0: raise ValueError( 'Key depth {} must be divisible by the number of attention heads {}' .format(key_dim, num_heads)) self.num_units = num_units self.num_heads = num_heads self.query_scale = (key_dim // num_heads) ** -0.5 self.query_linear = nn.Linear(query_dim, num_units, bias=False) self.key_linear = nn.Linear(key_dim, num_units, bias=False) self.value_linear = nn.Linear(key_dim, num_units, bias=False) def _split_heads(self, x): """ Split x such to add an extra num_heads dimension Input: x: a Tensor with shape [batch_size, seq_len, depth] Returns: A Tensor with shape [batch_size, num_heads, seq_len, depth / num_heads] """ if len(x.shape) != 3: raise ValueError('x must have rank 3') shape = x.shape return x.view(shape[0], shape[1], self.num_heads, shape[2] // self. num_heads).permute(0, 2, 1, 3) def _merge_heads(self, x): """ Merge the extra num_heads into the last dimension Input: x: a Tensor with shape [batch_size, num_heads, seq_len, depth/num_heads] Output: A Tensor with shape [batch_size, seq_len, depth] """ if len(x.shape) != 4: raise ValueError('x must have rank 4') shape = x.shape return x.permute(0, 2, 1, 3).contiguous().view(shape[0], shape[2], shape[3] * self.num_heads) def forward(self, queries, keys, values): queries = self.query_linear(queries) keys = self.key_linear(keys) values = self.value_linear(values) queries = self._split_heads(queries) keys = self._split_heads(keys) values = self._split_heads(values) queries *= self.query_scale logits = torch.matmul(queries, keys.permute(0, 1, 3, 2)) scores = F.softmax(logits, dim=-1) contexts = torch.matmul(scores, values) contexts = self._merge_heads(contexts) return contexts def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [4, 4, 4, 4]
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # out_3 => relu_1 # out_4 => view_4 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = (xindex // 300) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xp/cxpqywcqam7evubfwwa5zmt733w2zov6otomgqgpramgjdsnjg5k.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # out_5 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300, ), (1, )) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 25600, grid=grid(25600), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf7, 19200, grid=grid(19200), stream=stream0) del primals_5 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_3.run(buf6, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Actor(nn.Module): def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): super(Actor, self).__init__() self.fc1 = nn.Linear(nb_states, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) out = self.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nb_states': 4, 'nb_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf8, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2, primals_5, buf3, buf7, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK =128, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_sigmoid_3[grid(256)](buf6, primals_7, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), buf4, buf6, primals_6, buf7, primals_4, buf8 class ActorNew(nn.Module): def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): super(ActorNew, self).__init__() self.fc1 = nn.Linear(nb_states, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Emily0219/distiller
Actor
false
5,134
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Model(nn.Module): def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300): super().__init__() self.fc1 = nn.Linear(nb_states, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) out = self.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
RBFExpansion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/b5/cb5vwwuc6jprh6t63xo3xdds6dlzsjrzjlg2656esngpns4qhqa3.py # Topologically Sorted Source Nodes: [radial, pow_1, mul, exp], Original ATen: [aten.sub, aten.pow, aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # pow_1 => pow_1 # radial => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -10.0), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) triton_poi_fused_exp_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_exp_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = -10.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (300, ), (1, )) assert_size_stride(arg1_1, (4, 4, 4, 300), (4800, 1200, 300, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 300), (4800, 1200, 300, 1), torch.float32) # Topologically Sorted Source Nodes: [radial, pow_1, mul, exp], Original ATen: [aten.sub, aten.pow, aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_mul_pow_sub_0.run(arg1_1, arg0_1, buf0, 19200, grid=grid(19200), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 300), (4800, 1200, 300, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class RBFExpansion(nn.Module): """Expand distances between nodes by radial basis functions. .. math:: \\exp(- \\gamma * ||d - \\mu||^2) where :math:`d` is the distance between two nodes and :math:`\\mu` helps centralizes the distances. We use multiple centers evenly distributed in the range of :math:`[\\text{low}, \\text{high}]` with the difference between two adjacent centers being :math:`gap`. The number of centers is decided by :math:`(\\text{high} - \\text{low}) / \\text{gap}`. Choosing fewer centers corresponds to reducing the resolution of the filter. Parameters ---------- low : float Smallest center. Default to 0. high : float Largest center. Default to 30. gap : float Difference between two adjacent centers. :math:`\\gamma` will be computed as the reciprocal of gap. Default to 0.1. """ def __init__(self, low=0.0, high=30.0, gap=0.1): super(RBFExpansion, self).__init__() num_centers = int(np.ceil((high - low) / gap)) self.centers = np.linspace(low, high, num_centers) self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) self.gamma = 1 / gap def reset_parameters(self): """Reinitialize model parameters.""" self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) def forward(self, edge_dists): """Expand distances. Parameters ---------- edge_dists : float32 tensor of shape (E, 1) Distances between end nodes of edges, E for the number of edges. Returns ------- float32 tensor of shape (E, len(self.centers)) Expanded distances. """ radial = edge_dists - self.centers coef = -self.gamma return torch.exp(coef * radial ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 300])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = -10.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (300,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 300), (4800, 1200, 300, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 300), (4800, 1200, 300, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_pow_sub_0[grid(19200)](arg1_1, arg0_1, buf0, 19200, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class RBFExpansionNew(nn.Module): """Expand distances between nodes by radial basis functions. .. math:: \\exp(- \\gamma * ||d - \\mu||^2) where :math:`d` is the distance between two nodes and :math:`\\mu` helps centralizes the distances. We use multiple centers evenly distributed in the range of :math:`[\\text{low}, \\text{high}]` with the difference between two adjacent centers being :math:`gap`. The number of centers is decided by :math:`(\\text{high} - \\text{low}) / \\text{gap}`. Choosing fewer centers corresponds to reducing the resolution of the filter. Parameters ---------- low : float Smallest center. Default to 0. high : float Largest center. Default to 30. gap : float Difference between two adjacent centers. :math:`\\gamma` will be computed as the reciprocal of gap. Default to 0.1. """ def __init__(self, low=0.0, high=30.0, gap=0.1): super(RBFExpansionNew, self).__init__() num_centers = int(np.ceil((high - low) / gap)) self.centers = np.linspace(low, high, num_centers) self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) self.gamma = 1 / gap def reset_parameters(self): """Reinitialize model parameters.""" self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) def forward(self, input_0): arg0_1 = self.centers arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
Erfaan-Rostami/dgl-lifesci
RBFExpansion
false
5,135
[ "Apache-2.0" ]
1
08fc317f634fbaee4a8d074c332e871357845e4f
https://github.com/Erfaan-Rostami/dgl-lifesci/tree/08fc317f634fbaee4a8d074c332e871357845e4f
import torch import numpy as np import torch.nn as nn class Model(nn.Module): """Expand distances between nodes by radial basis functions. .. math:: \\exp(- \\gamma * ||d - \\mu||^2) where :math:`d` is the distance between two nodes and :math:`\\mu` helps centralizes the distances. We use multiple centers evenly distributed in the range of :math:`[\\text{low}, \\text{high}]` with the difference between two adjacent centers being :math:`gap`. The number of centers is decided by :math:`(\\text{high} - \\text{low}) / \\text{gap}`. Choosing fewer centers corresponds to reducing the resolution of the filter. Parameters ---------- low : float Smallest center. Default to 0. high : float Largest center. Default to 30. gap : float Difference between two adjacent centers. :math:`\\gamma` will be computed as the reciprocal of gap. Default to 0.1. """ def __init__(self, low=0.0, high=30.0, gap=0.1): super().__init__() num_centers = int(np.ceil((high - low) / gap)) self.centers = np.linspace(low, high, num_centers) self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) self.gamma = 1 / gap def reset_parameters(self): """Reinitialize model parameters.""" self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) def forward(self, edge_dists): """Expand distances. Parameters ---------- edge_dists : float32 tensor of shape (E, 1) Distances between end nodes of edges, E for the number of edges. Returns ------- float32 tensor of shape (E, len(self.centers)) Expanded distances. """ radial = edge_dists - self.centers coef = -self.gamma return torch.exp(coef * radial ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 300])] def get_init_inputs(): return []
Highway
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ii/ciitic7gw336jmjrzv6vnyt7nggf2elteacsju5rdvniywfl7erg.py # Topologically Sorted Source Nodes: [H, T, mul, sub, mul_1, add], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # H => relu # T => sigmoid # add => add # mul => mul # mul_1 => mul_1 # sub => sub # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %sigmoid), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [H, T, mul, sub, mul_1, add], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf0, buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_3, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Highway(nn.Module): def __init__(self, in_size, out_size): super(Highway, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf0, buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 class HighwayNew(nn.Module): def __init__(self, in_size, out_size): super(HighwayNew, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.H.weight primals_2 = self.H.bias primals_4 = self.T.weight primals_5 = self.T.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Emotional-Text-to-Speech/tacotron_pytorch
Highway
false
5,136
[ "MIT" ]
1
e6b1a3907afb01fe31bcbd77c677667adf6733f5
https://github.com/Emotional-Text-to-Speech/tacotron_pytorch/tree/e6b1a3907afb01fe31bcbd77c677667adf6733f5
import torch from torch import nn class Model(nn.Module): def __init__(self, in_size, out_size): super().__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/k4/ck4kzjcxmvgsq33iwbwj5qw3beros4s2syh4khiprtffq6wpjiia.py # Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = libdevice.sqrt(tmp4) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Norm(nn.Module): """ A module wrapper for vector/matrix norm """ def __init__(self, p='fro', dim=None, keepdim=False): super(Norm, self).__init__() self.p = p self.dim = dim self.keepdim = keepdim def forward(self, x: 'torch.Tensor'): return torch.norm(x, p=self.p, dim=self.dim, keepdim=self.keepdim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = libdevice.sqrt(tmp4) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class NormNew(nn.Module): """ A module wrapper for vector/matrix norm """ def __init__(self, p='fro', dim=None, keepdim=False): super(NormNew, self).__init__() self.p = p self.dim = dim self.keepdim = keepdim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Emily0219/distiller
Norm
false
5,137
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Model(nn.Module): """ A module wrapper for vector/matrix norm """ def __init__(self, p='fro', dim=None, keepdim=False): super().__init__() self.p = p self.dim = dim self.keepdim = keepdim def forward(self, x: 'torch.Tensor'): return torch.norm(x, p=self.p, dim=self.dim, keepdim=self.keepdim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ModelWithDuplicates
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/x7/cx7zib5vfcs4tjugjecjyojpxio3h7wkcy5bqp7pc5phvne4zdgj.py # Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # x_2 => tanh # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%relu,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_tanh_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_tanh_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_tanh_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_tanh_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 144000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 10 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = libdevice.tanh(tmp4) tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + (x3), tmp5, xmask) tl.store(out_ptr1 + (x0 + (3712*x4)), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sl/cslyor46ejkl5lvclqvfd2qnnvpo2y3hutdhtpmver5xbwv2l3ek.py # Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => convolution_1 # x_4 => relu_1 # x_5 => tanh_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%tanh, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%relu_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_tanh_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_tanh_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_tanh_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_tanh_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 269120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3364) % 20 x0 = xindex % 3364 x4 = (xindex // 3364) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = libdevice.tanh(tmp4) tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + (x3), tmp5, xmask) tl.store(out_ptr1 + (x0 + (3456*x4)), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (20, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 10, 60, 60), (36000, 3600, 60, 1), torch.float32) buf5 = empty_strided_cuda((4, 10, 60, 60), (37120, 3712, 60, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_tanh_threshold_backward_0.run(buf0, primals_2, buf1, buf5, 144000, grid=grid(144000), stream=stream0) del buf0 del primals_2 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 20, 58, 58), (67280, 3364, 58, 1)) buf3 = empty_strided_cuda((4, 20, 58, 58), (67280, 3364, 58, 1), torch.float32) buf4 = empty_strided_cuda((4, 20, 58, 58), (69120, 3456, 58, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward] triton_poi_fused_convolution_relu_tanh_threshold_backward_1.run(buf2, primals_5, buf3, buf4, 269120, grid=grid(269120), stream=stream0) del buf2 del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((20, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class ModelWithDuplicates(nn.Module): def __init__(self): super(ModelWithDuplicates, self).__init__() self.conv1 = nn.Conv2d(3, 10, 5) self.post_conv1 = nn.ModuleList([nn.ReLU(), nn.Tanh()]) self.conv2 = nn.Conv2d(10, 20, 3) self.post_conv2 = self.post_conv1 self.expected_mlist_to_dmlist = OrderedDict([('post_conv1', [ 'post_conv1']), ('post_conv2', ['post_conv2'])]) self.expected_list_contents_name_changes = OrderedDict([( 'post_conv1.0', 'post_conv1_0'), ('post_conv1.1', 'post_conv1_1'), ('post_conv2.0', 'post_conv2_0'), ( 'post_conv2.1', 'post_conv2_1')]) def forward(self, x): x = self.conv1(x) for m in self.post_conv1: x = m(x) x = self.conv2(x) for m in self.post_conv2: x = m(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from collections import OrderedDict import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_tanh_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 144000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 10 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = libdevice.tanh(tmp4) tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + x3, tmp5, xmask) tl.store(out_ptr1 + (x0 + 3712 * x4), tmp7, xmask) @triton.jit def triton_poi_fused_convolution_relu_tanh_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 269120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3364 % 20 x0 = xindex % 3364 x4 = xindex // 3364 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = libdevice.tanh(tmp4) tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + x3, tmp5, xmask) tl.store(out_ptr1 + (x0 + 3456 * x4), tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (20,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 10, 60, 60), (36000, 3600, 60, 1), torch.float32) buf5 = empty_strided_cuda((4, 10, 60, 60), (37120, 3712, 60, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_tanh_threshold_backward_0[grid( 144000)](buf0, primals_2, buf1, buf5, 144000, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 20, 58, 58), (67280, 3364, 58, 1)) buf3 = empty_strided_cuda((4, 20, 58, 58), (67280, 3364, 58, 1), torch.float32) buf4 = empty_strided_cuda((4, 20, 58, 58), (69120, 3456, 58, 1), torch.bool) triton_poi_fused_convolution_relu_tanh_threshold_backward_1[grid( 269120)](buf2, primals_5, buf3, buf4, 269120, XBLOCK=512, num_warps=8, num_stages=1) del buf2 del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf5 class ModelWithDuplicatesNew(nn.Module): def __init__(self): super(ModelWithDuplicatesNew, self).__init__() self.conv1 = nn.Conv2d(3, 10, 5) self.post_conv1 = nn.ModuleList([nn.ReLU(), nn.Tanh()]) self.conv2 = nn.Conv2d(10, 20, 3) self.post_conv2 = self.post_conv1 self.expected_mlist_to_dmlist = OrderedDict([('post_conv1', [ 'post_conv1']), ('post_conv2', ['post_conv2'])]) self.expected_list_contents_name_changes = OrderedDict([( 'post_conv1.0', 'post_conv1_0'), ('post_conv1.1', 'post_conv1_1'), ('post_conv2.0', 'post_conv2_0'), ( 'post_conv2.1', 'post_conv2_1')]) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Emily0219/distiller
ModelWithDuplicates
false
5,138
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 10, 5) self.post_conv1 = nn.ModuleList([nn.ReLU(), nn.Tanh()]) self.conv2 = nn.Conv2d(10, 20, 3) self.post_conv2 = self.post_conv1 self.expected_mlist_to_dmlist = OrderedDict([('post_conv1', [ 'post_conv1']), ('post_conv2', ['post_conv2'])]) self.expected_list_contents_name_changes = OrderedDict([( 'post_conv1.0', 'post_conv1_0'), ('post_conv1.1', 'post_conv1_1'), ('post_conv2.0', 'post_conv2_0'), ( 'post_conv2.1', 'post_conv2_1')]) def forward(self, x): x = self.conv1(x) for m in self.post_conv1: x = m(x) x = self.conv2(x) for m in self.post_conv2: x = m(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return []
Mean
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vz/cvzdeyzbjmguyc7weo3g2iu6knqdlesduaneomlvq4mxjrspo75o.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Mean(nn.Module): def __init__(self, *args, **kwargs): super(Mean, self).__init__() self.args = args self.kwargs = kwargs def forward(self, x: 'torch.Tensor'): return torch.mean(x, *self.args, **self.kwargs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class MeanNew(nn.Module): def __init__(self, *args, **kwargs): super(MeanNew, self).__init__() self.args = args self.kwargs = kwargs def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Emily0219/distiller
Mean
false
5,139
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing class Model(nn.Module): def __init__(self, *args, **kwargs): super().__init__() self.args = args self.kwargs = kwargs def forward(self, x: 'torch.Tensor'): return torch.mean(x, *self.args, **self.kwargs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
upsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/4p/c4pr5t22xrnsppbmsbt5ji55ctckvbqi56wpifug7vayzxbbqpdw.py # Topologically Sorted Source Nodes: [interpolate, add, truediv], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # add => add_5 # interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4 # truediv => div # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (256,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 0.011764705882352941), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_5, 2), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 256) % 256 x0 = xindex % 256 x2 = (xindex // 65536) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.011764705882352941 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), None, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), None, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = 1.0 tmp25 = triton_helpers.minimum(tmp23, tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp16 + tmp26 tmp28 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), None, eviction_policy='evict_last') tmp30 = tmp29 - tmp28 tmp31 = tmp30 * tmp25 tmp32 = tmp28 + tmp31 tmp33 = tmp27 - tmp32 tmp34 = tmp6.to(tl.float32) tmp35 = tmp5 - tmp34 tmp36 = triton_helpers.maximum(tmp35, tmp4) tmp37 = triton_helpers.minimum(tmp36, tmp24) tmp38 = tmp33 * tmp37 tmp39 = tmp32 + tmp38 tmp40 = tmp39 + tmp24 tmp41 = 0.5 tmp42 = tmp40 * tmp41 tl.store(in_out_ptr0 + (x3), tmp42, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 256, 256), (262144, 65536, 256, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [interpolate, add, truediv], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0.run(buf2, arg0_1, 1048576, grid=grid(1048576), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class upsample(nn.Module): def __init__(self): super(upsample, self).__init__() self.upsample = torch.nn.UpsamplingBilinear2d([256, 256]) def forward(self, input): return (self.upsample(input) + 1.0) / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.011764705882352941 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), None, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), None, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = 1.0 tmp25 = triton_helpers.minimum(tmp23, tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp16 + tmp26 tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), None, eviction_policy='evict_last') tmp30 = tmp29 - tmp28 tmp31 = tmp30 * tmp25 tmp32 = tmp28 + tmp31 tmp33 = tmp27 - tmp32 tmp34 = tmp6.to(tl.float32) tmp35 = tmp5 - tmp34 tmp36 = triton_helpers.maximum(tmp35, tmp4) tmp37 = triton_helpers.minimum(tmp36, tmp24) tmp38 = tmp33 * tmp37 tmp39 = tmp32 + tmp38 tmp40 = tmp39 + tmp24 tmp41 = 0.5 tmp42 = tmp40 * tmp41 tl.store(in_out_ptr0 + x3, tmp42, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 256, 256), (262144, 65536, 256, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_0[ grid(1048576)](buf2, arg0_1, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del arg0_1 return buf2, class upsampleNew(nn.Module): def __init__(self): super(upsampleNew, self).__init__() self.upsample = torch.nn.UpsamplingBilinear2d([256, 256]) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Euiyeon-Kim/SuperFAN-Pytorch
upsample
false
5,140
[ "MIT" ]
1
4a18e559c4b91d0d422b66e63509aeea8a7dc8f2
https://github.com/Euiyeon-Kim/SuperFAN-Pytorch/tree/4a18e559c4b91d0d422b66e63509aeea8a7dc8f2
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.upsample = torch.nn.UpsamplingBilinear2d([256, 256]) def forward(self, input): return (self.upsample(input) + 1.0) / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ClippedLinearQuantization
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zp/czptdn7jot5nhvvxjgbeji75wpdaf2gpuyywmstgg3fjdtzgdimv.py # Topologically Sorted Source Nodes: [input_1, mul, sub, output, add, output_1], Original ATen: [aten.clamp, aten.mul, aten.sub, aten.round, aten.add, aten.div] # Source node to ATen node mapping: # add => add # input_1 => clamp_max, clamp_min # mul => mul # output => round_1 # output_1 => div # sub => sub # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 3.75), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.0), kwargs = {}) # %round_1 : [num_users=1] = call_function[target=torch.ops.aten.round.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%round_1, 0.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 3.75), kwargs = {}) triton_poi_fused_add_clamp_div_mul_round_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_round_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_round_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_round_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 4.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = 3.75 tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp1 tmp8 = libdevice.nearbyint(tmp7) tmp9 = tmp8 + tmp1 tmp10 = 0.26666666666666666 tmp11 = tmp9 * tmp10 tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, mul, sub, output, add, output_1], Original ATen: [aten.clamp, aten.mul, aten.sub, aten.round, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_div_mul_round_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing def linear_dequantize(input, scale, zero_point, inplace=False): if inplace: input.add_(zero_point).div_(scale) return input return (input + zero_point) / scale def linear_quantize(input, scale, zero_point, inplace=False): if inplace: input.mul_(scale).sub_(zero_point).round_() return input return torch.round(scale * input - zero_point) def _prep_saturation_val_tensor(sat_val): is_scalar = not isinstance(sat_val, torch.Tensor) out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach() if not out.is_floating_point(): out = out if out.dim() == 0: out = out.unsqueeze(0) return is_scalar, out def asymmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, integral_zero_point=True, signed=False): scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min) scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max) is_scalar = scalar_min and scalar_max if scalar_max and not scalar_min: sat_max = sat_max elif scalar_min and not scalar_max: sat_min = sat_min if any(sat_min > sat_max): raise ValueError('saturation_min must be smaller than saturation_max') n = 2 ** num_bits - 1 sat_min = torch.min(sat_min, torch.zeros_like(sat_min)) sat_max = torch.max(sat_max, torch.zeros_like(sat_max)) diff = sat_max - sat_min diff[diff == 0] = n scale = n / diff zero_point = scale * sat_min if integral_zero_point: zero_point = zero_point.round() if signed: zero_point += 2 ** (num_bits - 1) if is_scalar: return scale.item(), zero_point.item() return scale, zero_point def clamp(input, min, max, inplace=False): if inplace: input.clamp_(min, max) return input return torch.clamp(input, min, max) class LinearQuantizeSTE(torch.autograd.Function): @staticmethod def forward(ctx, input, scale, zero_point, dequantize, inplace): if inplace: ctx.mark_dirty(input) output = linear_quantize(input, scale, zero_point, inplace) if dequantize: output = linear_dequantize(output, scale, zero_point, inplace) return output @staticmethod def backward(ctx, grad_output): return grad_output, None, None, None, None class ClippedLinearQuantization(nn.Module): def __init__(self, num_bits, clip_val, dequantize=True, inplace=False): super(ClippedLinearQuantization, self).__init__() self.num_bits = num_bits self.clip_val = clip_val self.scale, self.zero_point = asymmetric_linear_quantization_params( num_bits, 0, clip_val, signed=False) self.dequantize = dequantize self.inplace = inplace def forward(self, input): input = clamp(input, 0, self.clip_val, self.inplace) input = LinearQuantizeSTE.apply(input, self.scale, self.zero_point, self.dequantize, self.inplace) return input def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__. __name__, self.num_bits, self.clip_val, inplace_str) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_bits': 4, 'clip_val': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_div_mul_round_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 4.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = 3.75 tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp1 tmp8 = libdevice.nearbyint(tmp7) tmp9 = tmp8 + tmp1 tmp10 = 0.26666666666666666 tmp11 = tmp9 * tmp10 tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_div_mul_round_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def linear_dequantize(input, scale, zero_point, inplace=False): if inplace: input.add_(zero_point).div_(scale) return input return (input + zero_point) / scale def linear_quantize(input, scale, zero_point, inplace=False): if inplace: input.mul_(scale).sub_(zero_point).round_() return input return torch.round(scale * input - zero_point) def _prep_saturation_val_tensor(sat_val): is_scalar = not isinstance(sat_val, torch.Tensor) out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach() if not out.is_floating_point(): out = out if out.dim() == 0: out = out.unsqueeze(0) return is_scalar, out def asymmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, integral_zero_point=True, signed=False): scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min) scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max) is_scalar = scalar_min and scalar_max if scalar_max and not scalar_min: sat_max = sat_max elif scalar_min and not scalar_max: sat_min = sat_min if any(sat_min > sat_max): raise ValueError('saturation_min must be smaller than saturation_max') n = 2 ** num_bits - 1 sat_min = torch.min(sat_min, torch.zeros_like(sat_min)) sat_max = torch.max(sat_max, torch.zeros_like(sat_max)) diff = sat_max - sat_min diff[diff == 0] = n scale = n / diff zero_point = scale * sat_min if integral_zero_point: zero_point = zero_point.round() if signed: zero_point += 2 ** (num_bits - 1) if is_scalar: return scale.item(), zero_point.item() return scale, zero_point def clamp(input, min, max, inplace=False): if inplace: input.clamp_(min, max) return input return torch.clamp(input, min, max) class LinearQuantizeSTE(torch.autograd.Function): @staticmethod def forward(ctx, input, scale, zero_point, dequantize, inplace): if inplace: ctx.mark_dirty(input) output = linear_quantize(input, scale, zero_point, inplace) if dequantize: output = linear_dequantize(output, scale, zero_point, inplace) return output @staticmethod def backward(ctx, grad_output): return grad_output, None, None, None, None class ClippedLinearQuantizationNew(nn.Module): def __init__(self, num_bits, clip_val, dequantize=True, inplace=False): super(ClippedLinearQuantizationNew, self).__init__() self.num_bits = num_bits self.clip_val = clip_val self.scale, self.zero_point = asymmetric_linear_quantization_params( num_bits, 0, clip_val, signed=False) self.dequantize = dequantize self.inplace = inplace def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__. __name__, self.num_bits, self.clip_val, inplace_str) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Emily0219/distiller
ClippedLinearQuantization
false
5,141
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler import torch.onnx import torch.testing def linear_dequantize(input, scale, zero_point, inplace=False): if inplace: input.add_(zero_point).div_(scale) return input return (input + zero_point) / scale def linear_quantize(input, scale, zero_point, inplace=False): if inplace: input.mul_(scale).sub_(zero_point).round_() return input return torch.round(scale * input - zero_point) def _prep_saturation_val_tensor(sat_val): is_scalar = not isinstance(sat_val, torch.Tensor) out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach() if not out.is_floating_point(): out = out if out.dim() == 0: out = out.unsqueeze(0) return is_scalar, out def asymmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, integral_zero_point=True, signed=False): scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min) scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max) is_scalar = scalar_min and scalar_max if scalar_max and not scalar_min: sat_max = sat_max elif scalar_min and not scalar_max: sat_min = sat_min if any(sat_min > sat_max): raise ValueError('saturation_min must be smaller than saturation_max') n = 2 ** num_bits - 1 sat_min = torch.min(sat_min, torch.zeros_like(sat_min)) sat_max = torch.max(sat_max, torch.zeros_like(sat_max)) diff = sat_max - sat_min diff[diff == 0] = n scale = n / diff zero_point = scale * sat_min if integral_zero_point: zero_point = zero_point.round() if signed: zero_point += 2 ** (num_bits - 1) if is_scalar: return scale.item(), zero_point.item() return scale, zero_point def clamp(input, min, max, inplace=False): if inplace: input.clamp_(min, max) return input return torch.clamp(input, min, max) class LinearQuantizeSTE(torch.autograd.Function): @staticmethod def forward(ctx, input, scale, zero_point, dequantize, inplace): if inplace: ctx.mark_dirty(input) output = linear_quantize(input, scale, zero_point, inplace) if dequantize: output = linear_dequantize(output, scale, zero_point, inplace) return output @staticmethod def backward(ctx, grad_output): return grad_output, None, None, None, None class Model(nn.Module): def __init__(self, num_bits, clip_val, dequantize=True, inplace=False): super().__init__() self.num_bits = num_bits self.clip_val = clip_val self.scale, self.zero_point = asymmetric_linear_quantization_params( num_bits, 0, clip_val, signed=False) self.dequantize = dequantize self.inplace = inplace def forward(self, input): input = clamp(input, 0, self.clip_val, self.inplace) input = LinearQuantizeSTE.apply(input, self.scale, self.zero_point, self.dequantize, self.inplace) return input def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__. __name__, self.num_bits, self.clip_val, inplace_str) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.clone] # Source node to ATen node mapping: # processed_query => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bg/cbgmsaps4ljzc6rkbd4imsj3jo73tgvkd46dy7obklnnvintmaea.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mv] # Source node to ATen node mapping: # out => mul, sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %primals_5), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mv_1 = async_compile.triton('triton_poi_fused_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mv_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*(x0 // 4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + ((4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (1)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp15 = tl.load(in_ptr0 + (2 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (2)) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp23 = tl.load(in_ptr0 + (3 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (3 + (4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (3)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp6 = tmp3 * tmp5 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp13 = tmp10 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp21 = tmp18 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 + tmp24 tmp26 = libdevice.tanh(tmp25) tmp29 = tmp26 * tmp28 tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_normalized => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_normalized => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_key], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(primals_1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_key], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mv] triton_poi_fused_mv_1.run(buf1, buf3, primals_5, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out=buf7) return (reinterpret_tensor(buf7, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (4, 16, 1), 0), primals_5, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf3, buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler from torch.nn.parameter import Parameter import torch.onnx import torch.testing class EltwiseAdd(nn.Module): def __init__(self, inplace=False): super(EltwiseAdd, self).__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res += t else: for t in input[1:]: res = res + t return res class EltwiseMult(nn.Module): def __init__(self, inplace=False): super(EltwiseMult, self).__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res *= t else: for t in input[1:]: res = res * t return res class Matmul(nn.Module): """ A wrapper module for matmul operation between 2 tensors. """ def __init__(self): super(Matmul, self).__init__() def forward(self, a: 'torch.Tensor', b: 'torch.Tensor'): return a.matmul(b) class BatchMatmul(nn.Module): """ A wrapper module for torch.bmm operation between 2 tensors. """ def __init__(self): super(BatchMatmul, self).__init__() def forward(self, a: 'torch.Tensor', b: 'torch.Tensor'): return torch.bmm(a, b) class BahdanauAttention(nn.Module): """ It should be very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=False, dropout=0, batch_first=False): super(BahdanauAttention, self).__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) self.mask = None self.eltwiseadd_qk = EltwiseAdd() self.eltwiseadd_norm_bias = EltwiseAdd() self.eltwisemul_norm_scaler = EltwiseMult() self.tanh = nn.Tanh() self.matmul_score = Matmul() self.softmax_att = nn.Softmax(dim=-1) self.context_matmul = BatchMatmul() if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device= context.device) self.mask = indices >= context_len.unsqueeze(1) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = self.eltwiseadd_qk(att_query, att_keys) if self.normalize: sum_qk = self.eltwiseadd_norm_bias(sum_qk, self.normalize_bias) tmp = self.linear_att linear_att = tmp / tmp.norm() linear_att = linear_att linear_att = self.eltwisemul_norm_scaler(linear_att, self. normalize_scalar) else: linear_att = self.linear_att out = self.matmul_score(self.tanh(sum_qk), linear_att) return out def forward(self, query, keys): """ :param query: if batch_first: (b x t_q x n) else: (t_q x b x n) :param keys: if batch_first: (b x t_k x n) else (t_k x b x n) :returns: (context, scores_normalized) context: if batch_first: (b x t_q x n) else (t_q x b x n) scores_normalized: if batch_first (b x t_q x t_k) else (t_q x b x t_k) """ if not self.batch_first: keys = keys.transpose(0, 1) if query.dim() == 3: query = query.transpose(0, 1) if query.dim() == 2: single_query = True query = query.unsqueeze(1) else: single_query = False b = query.size(0) t_k = keys.size(1) t_q = query.size(1) processed_query = self.linear_q(query) processed_key = self.linear_k(keys) scores = self.calc_score(processed_query, processed_key) if self.mask is not None: mask = self.mask.unsqueeze(1).expand(b, t_q, t_k) scores.data.masked_fill_(mask, -65504.0) scores_normalized = self.softmax_att(scores) scores_normalized = self.dropout(scores_normalized) context = self.context_matmul(scores_normalized, keys) if single_query: context = context.squeeze(1) scores_normalized = scores_normalized.squeeze(1) elif not self.batch_first: context = context.transpose(0, 1) scores_normalized = scores_normalized.transpose(0, 1) return context, scores_normalized def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'query_size': 4, 'key_size': 4, 'num_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler from torch.nn.parameter import Parameter import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * (x0 // 4), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + (4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp15 = tl.load(in_ptr0 + (2 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + 2) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp23 = tl.load(in_ptr0 + (3 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr1 + (3 + 4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + 3) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp6 = tmp3 * tmp5 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp13 = tmp10 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp21 = tmp18 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 + tmp24 tmp26 = libdevice.tanh(tmp25) tmp29 = tmp26 * tmp28 tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused_mv_1[grid(64)](buf1, buf3, primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(primals_1, (4, 4, 4), ( 4, 16, 1), 0), out=buf7) return reinterpret_tensor(buf7, (4, 4, 4), (4, 16, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4), (4, 16, 1), 0 ), primals_5, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), buf3, buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0) class EltwiseAdd(nn.Module): def __init__(self, inplace=False): super(EltwiseAdd, self).__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res += t else: for t in input[1:]: res = res + t return res class EltwiseMult(nn.Module): def __init__(self, inplace=False): super(EltwiseMult, self).__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res *= t else: for t in input[1:]: res = res * t return res class Matmul(nn.Module): """ A wrapper module for matmul operation between 2 tensors. """ def __init__(self): super(Matmul, self).__init__() def forward(self, a: 'torch.Tensor', b: 'torch.Tensor'): return a.matmul(b) class BatchMatmul(nn.Module): """ A wrapper module for torch.bmm operation between 2 tensors. """ def __init__(self): super(BatchMatmul, self).__init__() def forward(self, a: 'torch.Tensor', b: 'torch.Tensor'): return torch.bmm(a, b) class BahdanauAttentionNew(nn.Module): """ It should be very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=False, dropout=0, batch_first=False): super(BahdanauAttentionNew, self).__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) self.mask = None self.eltwiseadd_qk = EltwiseAdd() self.eltwiseadd_norm_bias = EltwiseAdd() self.eltwisemul_norm_scaler = EltwiseMult() self.tanh = nn.Tanh() self.matmul_score = Matmul() self.softmax_att = nn.Softmax(dim=-1) self.context_matmul = BatchMatmul() if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device= context.device) self.mask = indices >= context_len.unsqueeze(1) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = self.eltwiseadd_qk(att_query, att_keys) if self.normalize: sum_qk = self.eltwiseadd_norm_bias(sum_qk, self.normalize_bias) tmp = self.linear_att linear_att = tmp / tmp.norm() linear_att = linear_att linear_att = self.eltwisemul_norm_scaler(linear_att, self. normalize_scalar) else: linear_att = self.linear_att out = self.matmul_score(self.tanh(sum_qk), linear_att) return out def forward(self, input_0, input_1): primals_5 = self.linear_att primals_3 = self.linear_q.weight primals_4 = self.linear_k.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
Emily0219/distiller
BahdanauAttention
false
5,142
[ "Apache-2.0" ]
1
445ed35b671fb54586acc280b53d951f18bf97ae
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
import math import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.optim.lr_scheduler import * import torch.optim.lr_scheduler from torch.nn.parameter import Parameter import torch.onnx import torch.testing class EltwiseAdd(nn.Module): def __init__(self, inplace=False): super().__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res += t else: for t in input[1:]: res = res + t return res class EltwiseMult(nn.Module): def __init__(self, inplace=False): super().__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res *= t else: for t in input[1:]: res = res * t return res class Matmul(nn.Module): """ A wrapper module for matmul operation between 2 tensors. """ def __init__(self): super().__init__() def forward(self, a: 'torch.Tensor', b: 'torch.Tensor'): return a.matmul(b) class BatchMatmul(nn.Module): """ A wrapper module for torch.bmm operation between 2 tensors. """ def __init__(self): super().__init__() def forward(self, a: 'torch.Tensor', b: 'torch.Tensor'): return torch.bmm(a, b) class Model(nn.Module): """ It should be very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=False, dropout=0, batch_first=False): super().__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) self.mask = None self.eltwiseadd_qk = EltwiseAdd() self.eltwiseadd_norm_bias = EltwiseAdd() self.eltwisemul_norm_scaler = EltwiseMult() self.tanh = nn.Tanh() self.matmul_score = Matmul() self.softmax_att = nn.Softmax(dim=-1) self.context_matmul = BatchMatmul() if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device= context.device) self.mask = indices >= context_len.unsqueeze(1) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).e # ... truncated (>4000 chars) for memory efficiency
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_2 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf1, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf0, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [dot], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [dot_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf9, buf10, 256, grid=grid(256), stream=stream0) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf11) del primals_6 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class SelfAttention(nn.Module): def __init__(self, embedding_dimension, num_heads): super().__init__() assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}' self.num_heads = num_heads k = embedding_dimension self.to_keys = nn.Linear(k, k * num_heads, bias=False) self.to_queries = nn.Linear(k, k * num_heads, bias=False) self.to_values = nn.Linear(k, k * num_heads, bias=False) self.unify_heads = nn.Linear(num_heads * k, k) def forward(self, x): b, t, k = x.size() h = self.num_heads keys = self.to_keys(x).view(b, t, h, k) queries = self.to_queries(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) keys = keys.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries.transpose(1, 2).contiguous().view(b * h, t, k) values = values.transpose(1, 2).contiguous().view(b * h, t, k) dot = torch.bmm(queries, keys.transpose(1, 2)) dot = dot / k ** (1 / 2) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, k) out = out.transpose(1, 2).contiguous().view(b, t, h * k) return self.unify_heads(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embedding_dimension': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (4, 16), (16, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clone_0[grid(256)](buf0, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_0[grid(256)](buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf11) del primals_6 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, embedding_dimension, num_heads): super().__init__() assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}' self.num_heads = num_heads k = embedding_dimension self.to_keys = nn.Linear(k, k * num_heads, bias=False) self.to_queries = nn.Linear(k, k * num_heads, bias=False) self.to_values = nn.Linear(k, k * num_heads, bias=False) self.unify_heads = nn.Linear(num_heads * k, k) def forward(self, input_0): primals_2 = self.to_keys.weight primals_3 = self.to_queries.weight primals_4 = self.to_values.weight primals_5 = self.unify_heads.weight primals_6 = self.unify_heads.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Ensembl/gene_pcp
SelfAttention
false
5,143
[ "Apache-2.0" ]
1
121be9895d414da3f13b5c8ec7588754e03336e1
https://github.com/Ensembl/gene_pcp/tree/121be9895d414da3f13b5c8ec7588754e03336e1
import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): def __init__(self, embedding_dimension, num_heads): super().__init__() assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}' self.num_heads = num_heads k = embedding_dimension self.to_keys = nn.Linear(k, k * num_heads, bias=False) self.to_queries = nn.Linear(k, k * num_heads, bias=False) self.to_values = nn.Linear(k, k * num_heads, bias=False) self.unify_heads = nn.Linear(num_heads * k, k) def forward(self, x): b, t, k = x.size() h = self.num_heads keys = self.to_keys(x).view(b, t, h, k) queries = self.to_queries(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) keys = keys.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries.transpose(1, 2).contiguous().view(b * h, t, k) values = values.transpose(1, 2).contiguous().view(b * h, t, k) dot = torch.bmm(queries, keys.transpose(1, 2)) dot = dot / k ** (1 / 2) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, k) out = out.transpose(1, 2).contiguous().view(b, t, h * k) return self.unify_heads(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
AffineChannel2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fb/cfbeyr3lhfnhw7ca27iubsdbjxh3gnvnzbr2oxoqiwodjw5uc7dc.py # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_1), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AffineChannel2d(nn.Module): """ A simple channel-wise affine transformation operation """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.weight = nn.Parameter(torch.Tensor(num_features)) self.bias = nn.Parameter(torch.Tensor(num_features)) self.weight.data.uniform_() self.bias.data.zero_() def forward(self, x): return x * self.weight.view(1, self.num_features, 1, 1 ) + self.bias.view(1, self.num_features, 1, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class AffineChannel2dNew(nn.Module): """ A simple channel-wise affine transformation operation """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.weight = nn.Parameter(torch.Tensor(num_features)) self.bias = nn.Parameter(torch.Tensor(num_features)) self.weight.data.uniform_() self.bias.data.zero_() def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FVL2020/2DImage2BMI
AffineChannel2d
false
5,144
[ "MIT" ]
1
90783bcb6fce0b91fb5ab70f62f595e3cfff39d0
https://github.com/FVL2020/2DImage2BMI/tree/90783bcb6fce0b91fb5ab70f62f595e3cfff39d0
import torch import torch.nn as nn class Model(nn.Module): """ A simple channel-wise affine transformation operation """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.weight = nn.Parameter(torch.Tensor(num_features)) self.bias = nn.Parameter(torch.Tensor(num_features)) self.weight.data.uniform_() self.bias.data.zero_() def forward(self, x): return x * self.weight.view(1, self.num_features, 1, 1 ) + self.bias.view(1, self.num_features, 1, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
FactorizationMachine
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5v/c5vqsotjiyyydjenxs3ttprtusysjuigcmpcuavw4a5cbsh4movc.py # Topologically Sorted Source Nodes: [sum_1, square_of_sum, pow_2, sum_of_square, ix, ix_1, mul], Original ATen: [aten.sum, aten.pow, aten.sub, aten.mul] # Source node to ATen node mapping: # ix => sub # ix_1 => sum_3 # mul => mul # pow_2 => pow_2 # square_of_sum => pow_1 # sum_1 => sum_1 # sum_of_square => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [1], True), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + (x2), tmp68, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, square_of_sum, pow_2, sum_of_square, ix, ix_1, mul], Original ATen: [aten.sum, aten.pow, aten.sub, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class FactorizationMachine(torch.nn.Module): def __init__(self, reduce_sum=True): super().__init__() self.reduce_sum = reduce_sum def forward(self, x): """ :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)`` """ square_of_sum = torch.sum(x, dim=1) ** 2 sum_of_square = torch.sum(x ** 2, dim=1) ix = square_of_sum - sum_of_square if self.reduce_sum: ix = torch.sum(ix, dim=1, keepdim=True) return 0.5 * ix def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class FactorizationMachineNew(torch.nn.Module): def __init__(self, reduce_sum=True): super().__init__() self.reduce_sum = reduce_sum def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Fanxingye/Autotabular
FactorizationMachine
false
5,145
[ "Apache-2.0" ]
1
d630c78290a52f8c73885afb16884e18135c34f6
https://github.com/Fanxingye/Autotabular/tree/d630c78290a52f8c73885afb16884e18135c34f6
import torch import torch.utils.data class Model(torch.nn.Module): def __init__(self, reduce_sum=True): super().__init__() self.reduce_sum = reduce_sum def forward(self, x): """ :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)`` """ square_of_sum = torch.sum(x, dim=1) ** 2 sum_of_square = torch.sum(x ** 2, dim=1) ix = square_of_sum - sum_of_square if self.reduce_sum: ix = torch.sum(ix, dim=1, keepdim=True) return 0.5 * ix def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AGELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def agelu(x): SQRT_M2_PI = math.sqrt(2 / math.pi) COEFF = 0.044715 return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow( x, 3)))) class AGELU(torch.nn.Module): def forward(self, input): return agelu(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data import torch.cuda import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def agelu(x): SQRT_M2_PI = math.sqrt(2 / math.pi) COEFF = 0.044715 return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow( x, 3)))) class AGELUNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Dan-hbd/NMTGMinor
AGELU
false
5,146
[ "MIT" ]
1
84e59ac8391ee78852d7c71afc60c3c8b8e3d44d
https://github.com/Dan-hbd/NMTGMinor/tree/84e59ac8391ee78852d7c71afc60c3c8b8e3d44d
import math import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def agelu(x): SQRT_M2_PI = math.sqrt(2 / math.pi) COEFF = 0.044715 return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow( x, 3)))) class Model(torch.nn.Module): def forward(self, input): return agelu(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/tz/ctzy7ve4z3i7lqshzeg47cdndeohawhnn4shd5m4djcnph2bosgc.py # Topologically Sorted Source Nodes: [prob, sub_2, pos_weight, mul_2, clamp, abs_1, neg, exp, add, log, sub, clamp_1, mul, pos_log_sig, mul_3, sub_3, neg_weight, mul_4, neg_1, clamp_2, abs_2, neg_2, exp_1, add_2, log_1, sub_1, clamp_3, mul_1, neg_log_sig, mul_5, add_4, loss, mul_6, sub_4, mul_7, avg_weight, mean, loss_1, mean_1], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.clamp, aten.abs, aten.neg, aten.exp, aten.add, aten.log, aten.sub, aten.mean, aten.div] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # add => add # add_2 => add_2 # add_4 => add_4 # avg_weight => add_5 # clamp => clamp_max # clamp_1 => clamp_max_1, clamp_min # clamp_2 => clamp_max_2 # clamp_3 => clamp_max_3, clamp_min_1 # exp => exp # exp_1 => exp_1 # log => log # log_1 => log_1 # loss => neg_3 # loss_1 => div # mean => mean # mean_1 => mean_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # neg => neg # neg_1 => neg_1 # neg_2 => neg_2 # neg_log_sig => add_3 # neg_weight => pow_2 # pos_log_sig => add_1 # pos_weight => pow_1 # prob => sigmoid # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_4 => sub_4 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %pow_1), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%arg0_1, 0), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max, %log), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %pow_2), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%neg_1, 0), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_2,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max_2, %log_1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_3, 0.5), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %mul_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_5), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_4,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %pow_1), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %pow_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_5,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_3, %mean), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp0 * tmp5 tmp7 = 0.0 tmp8 = triton_helpers.minimum(tmp1, tmp7) tmp9 = tl_math.abs(tmp1) tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tl_math.log(tmp12) tmp14 = tmp8 - tmp13 tmp15 = triton_helpers.maximum(tmp1, tmp7) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp6 * tmp19 tmp21 = tmp3 - tmp0 tmp22 = tmp2 * tmp2 tmp23 = tmp21 * tmp22 tmp24 = -tmp1 tmp25 = triton_helpers.minimum(tmp24, tmp7) tmp26 = tmp25 - tmp13 tmp27 = tmp26 + tmp18 tmp28 = tmp23 * tmp27 tmp29 = tmp20 + tmp28 tmp30 = -tmp29 tmp31 = tmp6 + tmp23 tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0)) tmp35 = 256.0 tmp36 = tmp34 / tmp35 tmp37 = tmp30 / tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = tmp40 / tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp41, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [prob, sub_2, pos_weight, mul_2, clamp, abs_1, neg, exp, add, log, sub, clamp_1, mul, pos_log_sig, mul_3, sub_3, neg_weight, mul_4, neg_1, clamp_2, abs_2, neg_2, exp_1, add_2, log_1, sub_1, clamp_3, mul_1, neg_log_sig, mul_5, add_4, loss, mul_6, sub_4, mul_7, avg_weight, mean, loss_1, mean_1], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.clamp, aten.abs, aten.neg, aten.exp, aten.add, aten.log, aten.sub, aten.mean, aten.div] stream0 = get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class FocalLoss(nn.Module): def __init__(self, gamma=2): super(FocalLoss, self).__init__() self.gamma = gamma def forward(self, input, target): pos_log_sig = log_sigmoid(input) neg_log_sig = log_minus_sigmoid(input) prob = torch.sigmoid(input) pos_weight = torch.pow(1 - prob, self.gamma) neg_weight = torch.pow(prob, self.gamma) loss = -(target * pos_weight * pos_log_sig + (1 - target) * neg_weight * neg_log_sig) avg_weight = target * pos_weight + (1 - target) * neg_weight loss /= avg_weight.mean() return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp0 * tmp5 tmp7 = 0.0 tmp8 = triton_helpers.minimum(tmp1, tmp7) tmp9 = tl_math.abs(tmp1) tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tl_math.log(tmp12) tmp14 = tmp8 - tmp13 tmp15 = triton_helpers.maximum(tmp1, tmp7) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp6 * tmp19 tmp21 = tmp3 - tmp0 tmp22 = tmp2 * tmp2 tmp23 = tmp21 * tmp22 tmp24 = -tmp1 tmp25 = triton_helpers.minimum(tmp24, tmp7) tmp26 = tmp25 - tmp13 tmp27 = tmp26 + tmp18 tmp28 = tmp23 * tmp27 tmp29 = tmp20 + tmp28 tmp30 = -tmp29 tmp31 = tmp6 + tmp23 tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0)) tmp35 = 256.0 tmp36 = tmp34 / tmp35 tmp37 = tmp30 / tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = tmp40 / tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0[ grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class FocalLossNew(nn.Module): def __init__(self, gamma=2): super(FocalLossNew, self).__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
FadedFate/TrackerSiamRPN
FocalLoss
false
5,147
[ "MIT" ]
1
f4156fa4bed9a0ca6c7ac9b653c07e564d8a058d
https://github.com/FadedFate/TrackerSiamRPN/tree/f4156fa4bed9a0ca6c7ac9b653c07e564d8a058d
import torch import torch.nn as nn def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class Model(nn.Module): def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, input, target): pos_log_sig = log_sigmoid(input) neg_log_sig = log_minus_sigmoid(input) prob = torch.sigmoid(input) pos_weight = torch.pow(1 - prob, self.gamma) neg_weight = torch.pow(prob, self.gamma) loss = -(target * pos_weight * pos_log_sig + (1 - target) * neg_weight * neg_log_sig) avg_weight = target * pos_weight + (1 - target) * neg_weight loss /= avg_weight.mean() return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ReLUDropout
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/z2/cz2rxhcm4pthct27emcnkzayyc4jm2tl6sekj3gkbml4p26f24mx.py # Topologically Sorted Source Nodes: [clamp_], Original ATen: [aten.clamp] # Source node to ATen node mapping: # clamp_ => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr1 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [clamp_], Original ATen: [aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0) return (arg0_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class ReLUDropout(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input): return relu_dropout(input, p=self.p, training=self.training, variational=self.variational, batch_first=self.batch_first) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.cuda import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) return arg0_1, def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class ReLUDropoutNew(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Dan-hbd/NMTGMinor
ReLUDropout
false
5,148
[ "MIT" ]
1
84e59ac8391ee78852d7c71afc60c3c8b8e3d44d
https://github.com/Dan-hbd/NMTGMinor/tree/84e59ac8391ee78852d7c71afc60c3c8b8e3d44d
import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class Model(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input): return relu_dropout(input, p=self.p, training=self.training, variational=self.variational, batch_first=self.batch_first) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AvgPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/l7/cl7te2znkphosjjgl5c32tkhoiehr4feuczsseen53wz7j4ifolb.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class AvgPool(nn.Module): """1-d average pooling module.""" def __init__(self, stride=None, padding=0): super(AvgPool, self).__init__() self.stride = stride self.padding = padding def forward(self, x): kernel_size = x.size(2) x = F.max_pool1d(input=x, kernel_size=kernel_size, stride=self. stride, padding=self.padding) return x.squeeze(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class AvgPoolNew(nn.Module): """1-d average pooling module.""" def __init__(self, stride=None, padding=0): super(AvgPoolNew, self).__init__() self.stride = stride self.padding = padding def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FengZiYjun/fastNLP
AvgPool
false
5,149
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class Model(nn.Module): """1-d average pooling module.""" def __init__(self, stride=None, padding=0): super().__init__() self.stride = stride self.padding = padding def forward(self, x): kernel_size = x.size(2) x = F.max_pool1d(input=x, kernel_size=kernel_size, stride=self. stride, padding=self.padding) return x.squeeze(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return []
InnerProductNetwork
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/4j/c4jap2mmmjx3sc7h5t72pmbjcbecx7tmbl4pp62ydyuz7rfnbxw5.py # Topologically Sorted Source Nodes: [getitem, getitem_1, mul, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] # Source node to ATen node mapping: # getitem => index # getitem_1 => index_1 # mul => mul # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) triton_poi_fused_index_mul_sum_0 = async_compile.triton('triton_poi_fused_index_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x0 = xindex % 4 x2 = (xindex // 24) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (x0 + (16*tmp16) + (64*x2)), xmask) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (x0 + (16*tmp22) + (64*x2)), xmask) tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (4 + x0 + (16*tmp16) + (64*x2)), xmask) tmp26 = tl.load(in_ptr0 + (4 + x0 + (16*tmp22) + (64*x2)), xmask) tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (8 + x0 + (16*tmp16) + (64*x2)), xmask) tmp30 = tl.load(in_ptr0 + (8 + x0 + (16*tmp22) + (64*x2)), xmask) tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (12 + x0 + (16*tmp16) + (64*x2)), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + (16*tmp22) + (64*x2)), xmask) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + (x3), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [getitem, getitem_1, mul, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_index_mul_sum_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class InnerProductNetwork(torch.nn.Module): def forward(self, x): """ :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)`` """ num_fields = x.shape[1] row, col = list(), list() for i in range(num_fields - 1): for j in range(i + 1, num_fields): row.append(i), col.append(j) return torch.sum(x[:, row] * x[:, col], dim=2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (x0 + 16 * tmp16 + 64 * x2), xmask) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (x0 + 16 * tmp22 + 64 * x2), xmask) tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (4 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp26 = tl.load(in_ptr0 + (4 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (8 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp30 = tl.load(in_ptr0 + (8 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (12 + x0 + 16 * tmp16 + 64 * x2), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + 16 * tmp22 + 64 * x2), xmask) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductNetworkNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Fanxingye/Autotabular
InnerProductNetwork
false
5,150
[ "Apache-2.0" ]
1
d630c78290a52f8c73885afb16884e18135c34f6
https://github.com/Fanxingye/Autotabular/tree/d630c78290a52f8c73885afb16884e18135c34f6
import torch import torch.utils.data class Model(torch.nn.Module): def forward(self, x): """ :param x: Float tensor of size ``(batch_size, num_fields, embed_dim)`` """ num_fields = x.shape[1] row, col = list(), list() for i in range(num_fields - 1): for j in range(i + 1, num_fields): row.append(i), col.append(j) return torch.sum(x[:, row] * x[:, col], dim=2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
RingLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/li/cli63qqsdizise6efdfq5upeadzy7c4nnnl6bxsonf2qrbibqd6n.py # Topologically Sorted Source Nodes: [norm, sub, pow_1, l, mul], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul] # Source node to ATen node mapping: # l => mean # mul => mul # norm => pow_1, pow_2, sum_1 # pow_1 => pow_3 # sub => sub # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %primals_2), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_4, 2.0), kwargs = {}) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (0)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tmp23 = 1.0 tmp24 = tmp22 * tmp23 tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp24, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [norm, sub, pow_1, l, mul], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0.run(buf3, primals_1, primals_2, buf2, 1, 64, grid=grid(1), stream=stream0) del primals_1 del primals_2 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RingLoss(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self, weight_ring=1.0): super(RingLoss, self).__init__() self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) self.weight_ring = weight_ring def forward(self, x): l = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return l * self.weight_ring def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tmp23 = 1.0 tmp24 = tmp22 * tmp23 tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3, primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf3, buf2 class RingLossNew(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self, weight_ring=1.0): super(RingLossNew, self).__init__() self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) self.weight_ring = weight_ring def forward(self, input_0): primals_2 = self.radius primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
FEIfei-coder/circle-loss-for-reid
RingLoss
false
5,151
[ "MIT" ]
1
fbb3be087a6c390fb7f8c000eebb63aa27179a13
https://github.com/FEIfei-coder/circle-loss-for-reid/tree/fbb3be087a6c390fb7f8c000eebb63aa27179a13
import torch import torch.nn as nn class Model(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self, weight_ring=1.0): super().__init__() self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) self.weight_ring = weight_ring def forward(self, x): l = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return l * self.weight_ring def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LinearBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/67/c67nwtvejry7jovauj4gfwnoxaeorcxngrt3hs2o3y3i4niugvpw.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit] # Source node to ATen node mapping: # x_1 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 0.001), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = (rindex // 4) x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0) + (16*r2)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 0.001 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + (x0), tmp21, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr1 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dy/cdyw65rioafwiakcljlghmqa24g7jnrjibhdgma4b5qyneqwjh77.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # x_2 => relu # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 0.001), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 0.001 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = 0.0 tmp17 = tmp15 <= tmp16 tl.store(out_ptr0 + (x3), tmp15, xmask) tl.store(out_ptr1 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf4 = empty_strided_cuda((1, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0.run(buf0, buf1, buf2, buf4, 4, 16, grid=grid(4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1.run(buf0, buf1, buf2, primals_4, primals_5, buf5, buf6, 64, grid=grid(64), stream=stream0) del buf2 del primals_5 return (buf5, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(buf4, (4, ), (1, ), 0), buf6, reinterpret_tensor(buf1, (1, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from scipy.stats import truncnorm def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class LinearBlock(torch.nn.Module): def __init__(self, input_size, output_size): super(LinearBlock, self).__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, x): x = self.linear(x) x = self.normalize(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from scipy.stats import truncnorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 0.001 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 0.001 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = 0.0 tmp17 = tmp15 <= tmp16 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf4 = empty_strided_cuda((1, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0[grid(4)](buf0, buf1, buf2, buf4, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(64)](buf0, buf1, buf2, primals_4, primals_5, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del primals_5 return buf5, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf4, (4,), (1,), 0 ), buf6, reinterpret_tensor(buf1, (1, 4, 1), (4, 1, 1), 0) def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class LinearBlockNew(torch.nn.Module): def __init__(self, input_size, output_size): super(LinearBlockNew, self).__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, input_0): primals_2 = self.normalize.weight primals_4 = self.normalize.bias primals_1 = self.linear.weight primals_5 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Barchid/snn-fsl
LinearBlock
false
5,152
[ "Apache-2.0" ]
1
8adca6b7541d51b4ac4198f00e784e54589b4c9d
https://github.com/Barchid/snn-fsl/tree/8adca6b7541d51b4ac4198f00e784e54589b4c9d
import torch from scipy.stats import truncnorm def truncated_normal_(tensor, mean=0.0, std=1.0): values = truncnorm.rvs(-2, 2, size=tensor.shape) values = mean + std * values tensor.copy_(torch.from_numpy(values)) return tensor def fc_init_(module): if hasattr(module, 'weight') and module.weight is not None: truncated_normal_(module.weight.data, mean=0.0, std=0.01) if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.constant_(module.bias.data, 0.0) return module class Model(torch.nn.Module): def __init__(self, input_size, output_size): super().__init__() self.relu = torch.nn.ReLU() self.normalize = torch.nn.BatchNorm1d(output_size, affine=True, momentum=0.999, eps=0.001, track_running_stats=False) self.linear = torch.nn.Linear(input_size, output_size) fc_init_(self.linear) def forward(self, x): x = self.linear(x) x = self.normalize(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
LNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wq/cwq4v2t2casrvnd4kuih7ltbgx5737ucaocshzecbzd5g64pkqnq.py # Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.clone] # Source node to ATen node mapping: # lnn_out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.abs(tmp0) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = libdevice.log1p(tmp3) tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bm/cbmeb2qgbivgi5ugwn3lxt7jt7xsbz2udthbkyvylpkqrid3yapc.py # Topologically Sorted Source Nodes: [lnn_out, lnn_exp, relu], Original ATen: [aten.clone, aten.expm1, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # lnn_exp => expm1 # lnn_out => clone_1 # relu => relu # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%clone_1,), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%expm1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_clone_expm1_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_clone_expm1_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_expm1_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = libdevice.expm1(tmp0) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = 0.0 tmp5 = tmp3 <= tmp4 tl.store(out_ptr0 + (x2 + (4*y3)), tmp3, xmask & ymask) tl.store(out_ptr1 + (x2 + (4*y3)), tmp5, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [lnn_out, lnn_exp, relu], Original ATen: [aten.clone, aten.expm1, aten.relu, aten.threshold_backward] triton_poi_fused_clone_expm1_relu_threshold_backward_1.run(buf1, buf2, buf3, 64, 4, grid=grid(64, 4), stream=stream0) return (reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.utils.data class LNN(torch.nn.Module): """A pytorch implementation of LNN layer Input shape. - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super(LNN, self).__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, x): """ :param x: Long tensor of size ``(batch_size, num_fields, embedding_size)`` """ embed_x_abs = torch.abs(x) embed_x_afn = torch.add(embed_x_abs, 1e-07) embed_x_log = torch.log1p(embed_x_afn) lnn_out = torch.matmul(self.weight, embed_x_log) if self.bias is not None: lnn_out += self.bias lnn_exp = torch.expm1(lnn_out) output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_fields': 4, 'embed_dim': 4, 'LNN_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.abs(tmp0) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = libdevice.log1p(tmp3) tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = libdevice.expm1(tmp0) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = 0.0 tmp5 = tmp3 <= tmp4 tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp5, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clone_expm1_relu_threshold_backward_1[grid(64, 4)]( buf1, buf2, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) return reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor( buf0, (64, 4), (4, 1), 0), buf1, buf3 class LNNNew(torch.nn.Module): """A pytorch implementation of LNN layer Input shape. - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super(LNNNew, self).__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Fanxingye/Autotabular
LNN
false
5,153
[ "Apache-2.0" ]
1
d630c78290a52f8c73885afb16884e18135c34f6
https://github.com/Fanxingye/Autotabular/tree/d630c78290a52f8c73885afb16884e18135c34f6
import math import torch import torch.nn.functional as F import torch.utils.data class Model(torch.nn.Module): """A pytorch implementation of LNN layer Input shape. - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``. Arguments - **in_features** : Embedding of feature. - **num_fields**: int.The field size of feature. - **LNN_dim**: int.The number of Logarithmic neuron. - **bias**: bool.Whether or not use bias in LNN. """ def __init__(self, num_fields, embed_dim, LNN_dim, bias=False): super().__init__() self.num_fields = num_fields self.embed_dim = embed_dim self.LNN_dim = LNN_dim self.lnn_output_dim = LNN_dim * embed_dim self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, x): """ :param x: Long tensor of size ``(batch_size, num_fields, embedding_size)`` """ embed_x_abs = torch.abs(x) embed_x_afn = torch.add(embed_x_abs, 1e-07) embed_x_log = torch.log1p(embed_x_afn) lnn_out = torch.matmul(self.weight, embed_x_log) if self.bias is not None: lnn_out += self.bias lnn_exp = torch.expm1(lnn_out) output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FCDiscriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fl/cflskeukqjcpn5pfynzkpwyovblowpewl3lyqiwirncoqacxcylo.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fi/cfik3ekgqfui53hs3oovko4x7tlh4b2wbgnht32gjrarwmhugyng.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ed/cedb4xkhtn35e7lnqetrdwsyqvpftp56fehphd4yymgaavex4aka.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_4 => convolution_2 # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7q/c7qkey6xgen6g4in6j4xxwlisrmnbat37t62h6ukhjyafaazep4c.py # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_6 => convolution_3 # x_7 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ik/cik5pyqifucbhblhkx6wygggufrollappejam67g5gl4ncmmv2wh.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_8 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (1, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, 131072, grid=grid(131072), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf5, primals_7, 65536, grid=grid(65536), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf7, primals_9, 32768, grid=grid(32768), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 2, 2), (4, 4, 2, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf9, primals_11, 16, grid=grid(16), stream=stream0) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FCDiscriminator(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.conv4(x) x = self.leaky_relu(x) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (1, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(65536)](buf5, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_3[grid(32768)](buf7, primals_9, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 2, 2), (4, 4, 2, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(16)](buf9, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) class FCDiscriminatorNew(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminatorNew, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.classifier.weight primals_11 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
EvanfanBao/Adversarial_DA_Exp
FCDiscriminator
false
5,154
[ "MIT" ]
1
09979742d83fe6fd5de9b9f3aa6aa5fe9a44ea54
https://github.com/EvanfanBao/Adversarial_DA_Exp/tree/09979742d83fe6fd5de9b9f3aa6aa5fe9a44ea54
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, num_classes, ndf=64): super().__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.conv4(x) x = self.leaky_relu(x) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [4]
ConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ft/cftqaeqt35oge5l3bbpv3uhleqvp2lsejqwbjdklod7sy6k66dz2.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/iv/civnax7wqicboqqn4dgl5wdes7sneuhnrvfbndkqy6eukxxukgmy.py # Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # x => relu # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [3, 3], [3, 3], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = (xindex // 20) % 20 x5 = (xindex // 400) x3 = (xindex // 1200) x4 = xindex % 1200 tmp0 = tl.load(in_ptr0 + ((3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (63 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (64 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (124 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (125 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (126 + (3*x0) + (186*x1) + (3844*x5)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tmp42 = tl.full([1], 0, tl.int32) tmp43 = triton_helpers.maximum(tmp42, tmp16) tmp44 = 0.0 tmp45 = tmp43 <= tmp44 tl.store(out_ptr0 + (x4 + (1280*x3)), tmp41, xmask) tl.store(in_out_ptr0 + (x4 + (1216*x3)), tmp43, xmask) tl.store(out_ptr1 + (x4 + (1280*x3)), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6j/c6jxic53xcwskw4sutgcmasi3quyeifp26ovg4ohwmby6vzwxsqj.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # x => relu # x_1 => view # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu, [-1, 192]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + ((1216*(x0 // 1200)) + (x0 % 1200)), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xk/cxka33fsjgumiuk2joc3cs43ddlpebgh572xevw74a3o7munm2c6.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (3, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (3, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (10, 192), (192, 1)) assert_size_stride(primals_5, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 46128, grid=grid(46128), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 3, 20, 20), (1216, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 3, 20, 20), (1280, 400, 20, 1), torch.int8) buf4 = buf2; del buf2 # reuse buf10 = empty_strided_cuda((4, 3, 20, 20), (1280, 400, 20, 1), torch.bool) # Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1.run(buf4, buf1, buf3, buf10, 4800, grid=grid(4800), stream=stream0) buf5 = empty_strided_cuda((25, 192), (192, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf4, buf5, 4800, grid=grid(4800), stream=stream0) del buf4 buf6 = empty_strided_cuda((25, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf5, reinterpret_tensor(primals_4, (192, 10), (1, 192), 0), alpha=1, beta=1, out=buf6) del primals_5 buf9 = empty_strided_cuda((25, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_3.run(buf6, buf9, 25, 10, grid=grid(25), stream=stream0) del buf6 return (buf9, primals_1, primals_3, buf1, buf3, buf5, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((3, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 192), (192, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 % 20 x5 = xindex // 400 x3 = xindex // 1200 x4 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (63 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (64 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (124 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (125 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (126 + 3 * x0 + 186 * x1 + 3844 * x5), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tmp42 = tl.full([1], 0, tl.int32) tmp43 = triton_helpers.maximum(tmp42, tmp16) tmp44 = 0.0 tmp45 = tmp43 <= tmp44 tl.store(out_ptr0 + (x4 + 1280 * x3), tmp41, xmask) tl.store(in_out_ptr0 + (x4 + 1216 * x3), tmp43, xmask) tl.store(out_ptr1 + (x4 + 1280 * x3), tmp45, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1216 * (x0 // 1200) + x0 % 1200), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (3, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (10, 192), (192, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(46128)](buf1, primals_2, 46128, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 3, 20, 20), (1216, 400, 20, 1), torch .float32) buf3 = empty_strided_cuda((4, 3, 20, 20), (1280, 400, 20, 1), torch .int8) buf4 = buf2 del buf2 buf10 = empty_strided_cuda((4, 3, 20, 20), (1280, 400, 20, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1[grid (4800)](buf4, buf1, buf3, buf10, 4800, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((25, 192), (192, 1), torch.float32) triton_poi_fused_relu_view_2[grid(4800)](buf4, buf5, 4800, XBLOCK= 128, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf5, reinterpret_tensor(primals_4, (192, 10), (1, 192), 0), alpha=1, beta=1, out=buf6) del primals_5 buf9 = empty_strided_cuda((25, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_3[grid(25)](buf6, buf9, 25, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf6 return buf9, primals_1, primals_3, buf1, buf3, buf5, buf9, primals_4, buf10 class ConvNetNew(nn.Module): def __init__(self): super(ConvNetNew, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Fanxingye/AutoDL
ConvNet
false
5,155
[ "Apache-2.0" ]
1
6f409aefc8b81e5fe47df57b82332c8df427875d
https://github.com/Fanxingye/AutoDL/tree/6f409aefc8b81e5fe47df57b82332c8df427875d
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return []
TReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/py/cpyzadjwmhrswle2fe2xnqc4ovcx6ej5wf6x5qrio2i2w7v2ppfk.py # Topologically Sorted Source Nodes: [sub, relu, x], Original ATen: [aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # sub => sub # x => add # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_sub_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_sub_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_sub_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp5 + tmp2 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [sub, relu, x], Original ATen: [aten.sub, aten.relu, aten.add, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0.run(primals_2, primals_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class TReLU(nn.Module): def __init__(self): super(TReLU, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, x): x = F.relu(x - self.alpha) + self.alpha return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp5 + tmp2 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0[grid(256)](primals_2 , primals_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_1 del primals_2 return buf0, buf1 class TReLUNew(nn.Module): def __init__(self): super(TReLUNew, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, input_0): primals_1 = self.alpha primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
FightingSrain/ColorRL
TReLU
false
5,156
[ "MIT" ]
1
2576304d56c2337e2c1cb8fba93888d984ed701b
https://github.com/FightingSrain/ColorRL/tree/2576304d56c2337e2c1cb8fba93888d984ed701b
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, x): x = F.relu(x - self.alpha) + self.alpha return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ArcBiaffine
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zq/czqnr565ambakn75mxihcdbhn7d27w3t6egxhblwlzsw2njqz7gl.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mv] # Source node to ATen node mapping: # matmul_1 => mul, sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %primals_4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mv_0 = async_compile.triton('triton_poi_fused_mv_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mv_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qt/cqty7ixgnj6ymsefadavis3iwxnitomsuz2twh2sgj6lfymhccbj.py # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.add] # Source node to ATen node mapping: # output_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %unsqueeze), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mv] stream0 = get_raw_stream(0) triton_poi_fused_mv_0.run(primals_3, primals_4, buf2, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf3, buf2, 64, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class ArcBiaffine(nn.Module): """helper module for Biaffine Dependency Parser predicting arc """ def __init__(self, hidden_size, bias=True): super(ArcBiaffine, self).__init__() self.U = nn.Parameter(torch.Tensor(hidden_size, hidden_size), requires_grad=True) self.has_bias = bias if self.has_bias: self.bias = nn.Parameter(torch.Tensor(hidden_size), requires_grad=True) else: self.register_parameter('bias', None) initial_parameter(self) def forward(self, head, dep): """ :param head arc-head tensor = [batch, length, emb_dim] :param dep arc-dependent tensor = [batch, length, emb_dim] :return output tensor = [bacth, length, length] """ output = dep.matmul(self.U) output = output.bmm(head.transpose(-1, -2)) if self.has_bias: output += head.matmul(self.bias).unsqueeze(1) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mv_0[grid(16)](primals_3, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = buf1 del buf1 triton_poi_fused_add_1[grid(64)](buf3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0) def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class ArcBiaffineNew(nn.Module): """helper module for Biaffine Dependency Parser predicting arc """ def __init__(self, hidden_size, bias=True): super(ArcBiaffineNew, self).__init__() self.U = nn.Parameter(torch.Tensor(hidden_size, hidden_size), requires_grad=True) self.has_bias = bias if self.has_bias: self.bias = nn.Parameter(torch.Tensor(hidden_size), requires_grad=True) else: self.register_parameter('bias', None) initial_parameter(self) def forward(self, input_0, input_1): primals_2 = self.U primals_4 = self.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
FengZiYjun/fastNLP
ArcBiaffine
false
5,157
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class Model(nn.Module): """helper module for Biaffine Dependency Parser predicting arc """ def __init__(self, hidden_size, bias=True): super().__init__() self.U = nn.Parameter(torch.Tensor(hidden_size, hidden_size), requires_grad=True) self.has_bias = bias if self.has_bias: self.bias = nn.Parameter(torch.Tensor(hidden_size), requires_grad=True) else: self.register_parameter('bias', None) initial_parameter(self) def forward(self, head, dep): """ :param head arc-head tensor = [batch, length, emb_dim] :param dep arc-dependent tensor = [batch, length, emb_dim] :return output tensor = [bacth, length, length] """ output = dep.matmul(self.U) output = output.bmm(head.transpose(-1, -2)) if self.has_bias: output += head.matmul(self.bias).unsqueeze(1) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
MaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/o6/co6ezbyvezbvynrzlatkrzkdv3tvccehgwlzayq3pl7wrc36sbap.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class MaxPool(nn.Module): """1-d max-pooling module.""" def __init__(self, stride=None, padding=0, dilation=1): super(MaxPool, self).__init__() self.stride = stride self.padding = padding self.dilation = dilation def forward(self, x): x = torch.transpose(x, 1, 2) kernel_size = x.size(2) x = F.max_pool1d(input=x, kernel_size=kernel_size, stride=self. stride, padding=self.padding, dilation=self.dilation) return x.squeeze(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class MaxPoolNew(nn.Module): """1-d max-pooling module.""" def __init__(self, stride=None, padding=0, dilation=1): super(MaxPoolNew, self).__init__() self.stride = stride self.padding = padding self.dilation = dilation def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FengZiYjun/fastNLP
MaxPool
false
5,158
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class Model(nn.Module): """1-d max-pooling module.""" def __init__(self, stride=None, padding=0, dilation=1): super().__init__() self.stride = stride self.padding = padding self.dilation = dilation def forward(self, x): x = torch.transpose(x, 1, 2) kernel_size = x.size(2) x = F.max_pool1d(input=x, kernel_size=kernel_size, stride=self. stride, padding=self.padding, dilation=self.dilation) return x.squeeze(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return []
Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/eg/ceg3yqeihvaxoe7s7xrlnayevvum4hx2gnba5t7dkjpxnbbqra3h.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => convolution # x_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, 16, grid=grid(16), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class Conv(nn.Module): """ Basic 1-d convolution module. initialize with xavier_uniform """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, activation='relu', initial_method=None): super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, groups=groups, bias=bias) activations = {'relu': nn.ReLU(), 'tanh': nn.Tanh()} if activation in activations: self.activation = activations[activation] else: raise Exception('Should choose activation function from: ' + ', '.join([x for x in activations])) initial_parameter(self, initial_method) def forward(self, x): x = torch.transpose(x, 1, 2) x = self.conv(x) x = self.activation(x) x = torch.transpose(x, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(16)](buf2, primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0 ), buf3 def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class ConvNew(nn.Module): """ Basic 1-d convolution module. initialize with xavier_uniform """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, activation='relu', initial_method=None): super(ConvNew, self).__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, groups=groups, bias=bias) activations = {'relu': nn.ReLU(), 'tanh': nn.Tanh()} if activation in activations: self.activation = activations[activation] else: raise Exception('Should choose activation function from: ' + ', '.join([x for x in activations])) initial_parameter(self, initial_method) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FengZiYjun/fastNLP
Conv
false
5,159
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) class Model(nn.Module): """ Basic 1-d convolution module. initialize with xavier_uniform """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, activation='relu', initial_method=None): super().__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, groups=groups, bias=bias) activations = {'relu': nn.ReLU(), 'tanh': nn.Tanh()} if activation in activations: self.activation = activations[activation] else: raise Exception('Should choose activation function from: ' + ', '.join([x for x in activations])) initial_parameter(self, initial_method) def forward(self, x): x = torch.transpose(x, 1, 2) x = self.conv(x) x = self.activation(x) x = torch.transpose(x, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
DotAtte
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fx/cfxd3l5gpfhuegvipp3nirh7g4kaw2lnmtk3hh2j36lb4doz33wg.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # output_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2h/c2h5d4qaovdjfx6dqfb3zoqistf7oryqridolv5lvyya6cyzxxl7.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # output_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) del arg1_1 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4) del arg2_1 del buf3 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.utils.data def seq_mask(seq_len, max_len): """Create sequence mask. :param seq_len: list or torch.Tensor, the lengths of sequences in a batch. :param max_len: int, the maximum sequence length in a batch. :return mask: torch.LongTensor, [batch_size, max_len] """ if not isinstance(seq_len, torch.Tensor): seq_len = torch.LongTensor(seq_len) seq_len = seq_len.view(-1, 1).long() seq_range = torch.arange(start=0, end=max_len, dtype=torch.long, device =seq_len.device).view(1, -1) return torch.gt(seq_len, seq_range) class DotAtte(nn.Module): def __init__(self, key_size, value_size): super(DotAtte, self).__init__() self.key_size = key_size self.value_size = value_size self.scale = math.sqrt(key_size) def forward(self, Q, K, V, seq_mask=None): """ :param Q: [batch, seq_len, key_size] :param K: [batch, seq_len, key_size] :param V: [batch, seq_len, value_size] :param seq_mask: [batch, seq_len] """ output = torch.matmul(Q, K.transpose(1, 2)) / self.scale if seq_mask is not None: output.masked_fill_(seq_mask.lt(1), -float('inf')) output = nn.functional.softmax(output, dim=2) return torch.matmul(output, V) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'key_size': 4, 'value_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def seq_mask(seq_len, max_len): """Create sequence mask. :param seq_len: list or torch.Tensor, the lengths of sequences in a batch. :param max_len: int, the maximum sequence length in a batch. :return mask: torch.LongTensor, [batch_size, max_len] """ if not isinstance(seq_len, torch.Tensor): seq_len = torch.LongTensor(seq_len) seq_len = seq_len.view(-1, 1).long() seq_range = torch.arange(start=0, end=max_len, dtype=torch.long, device =seq_len.device).view(1, -1) return torch.gt(seq_len, seq_range) class DotAtteNew(nn.Module): def __init__(self, key_size, value_size): super(DotAtteNew, self).__init__() self.key_size = key_size self.value_size = value_size self.scale = math.sqrt(key_size) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
FengZiYjun/fastNLP
DotAtte
false
5,160
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import math import torch from torch import nn import torch.utils.data def seq_mask(seq_len, max_len): """Create sequence mask. :param seq_len: list or torch.Tensor, the lengths of sequences in a batch. :param max_len: int, the maximum sequence length in a batch. :return mask: torch.LongTensor, [batch_size, max_len] """ if not isinstance(seq_len, torch.Tensor): seq_len = torch.LongTensor(seq_len) seq_len = seq_len.view(-1, 1).long() seq_range = torch.arange(start=0, end=max_len, dtype=torch.long, device =seq_len.device).view(1, -1) return torch.gt(seq_len, seq_range) class Model(nn.Module): def __init__(self, key_size, value_size): super().__init__() self.key_size = key_size self.value_size = value_size self.scale = math.sqrt(key_size) def forward(self, Q, K, V, seq_mask=None): """ :param Q: [batch, seq_len, key_size] :param K: [batch, seq_len, key_size] :param V: [batch, seq_len, value_size] :param seq_mask: [batch, seq_len] """ output = torch.matmul(Q, K.transpose(1, 2)) / self.scale if seq_mask is not None: output.masked_fill_(seq_mask.lt(1), -float('inf')) output = nn.functional.softmax(output, dim=2) return torch.matmul(output, V) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/h6/ch6lbegsbikmeuedhuqntueffmboz2ejtnlwrp3sosxr5gscbhtj.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # norm => add # out => mul # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Feywell/association_lstm_implement
L2Norm
false
5,161
[ "MIT" ]
1
4e439bd934dc865aad0015a897980a8f124602af
https://github.com/Feywell/association_lstm_implement/tree/4e439bd934dc865aad0015a897980a8f124602af
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class Model(nn.Module): def __init__(self, n_channels, scale): super().__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 1.0]
LabelBilinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %primals_3], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fk/cfkqxc7jzg3ikhwzpks3pgmwlo7dmomyw6g4nwkukqjsohbsldpw.py # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add] # Source node to ATen node mapping: # output => add # output_1 => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %view_4), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output], Original ATen: [aten._trilinear] buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), primals_1, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_4, primals_3, buf2, 128, grid=grid(128), stream=stream0) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf3) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf4, primals_2, buf3, 64, grid=grid(64), stream=stream0) del buf3 del primals_2 return (buf4, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 8), (8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data class LabelBilinear(nn.Module): """helper module for Biaffine Dependency Parser predicting label """ def __init__(self, in1_features, in2_features, num_label, bias=True): super(LabelBilinear, self).__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False ) def forward(self, x1, x2): output = self.bilinear(x1, x2) output += self.lin(torch.cat([x1, x2], dim=2)) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in1_features': 4, 'in2_features': 4, 'num_label': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (16, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_3, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_4, primals_3, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf3) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(64)](buf4, primals_2, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf3 del primals_2 return buf4, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (16, 8), (8, 1), 0) class LabelBilinearNew(nn.Module): """helper module for Biaffine Dependency Parser predicting label """ def __init__(self, in1_features, in2_features, num_label, bias=True): super(LabelBilinearNew, self).__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False ) def forward(self, input_0, input_1): primals_1 = self.bilinear.weight primals_2 = self.bilinear.bias primals_5 = self.lin.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
FengZiYjun/fastNLP
LabelBilinear
false
5,162
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch from torch import nn import torch.utils.data class Model(nn.Module): """helper module for Biaffine Dependency Parser predicting label """ def __init__(self, in1_features, in2_features, num_label, bias=True): super().__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False ) def forward(self, x1, x2): output = self.bilinear(x1, x2) output += self.lin(torch.cat([x1, x2], dim=2)) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
BiAffine
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wm/cwm4p6i4onfgzirdknbkkkkalyi643y6ynzifzggmplzobgfpkd4.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gb/cgbvhbwg6zjbgpseylzouy4kgopkhyzopan7swcnpk23a2o45zr5.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # output_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/oz/cozxojbwhgofvogqiimshn6ijmzvg2zn4rlyuttbab5raacnhzht.py # Topologically Sorted Source Nodes: [add, add_1, output_2], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # output_2 => add_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %unsqueeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_6), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x2 + (4*x1) + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_5, buf3, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_2, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [add, add_1, output_2], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf7, buf0, buf1, primals_6, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_6 return (buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data from torch.nn import Parameter class BiAffine(nn.Module): def __init__(self, n_enc, n_dec, n_labels, biaffine=True, **kwargs): """ Args: n_enc: int the dimension of the encoder input. n_dec: int the dimension of the decoder input. n_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super(BiAffine, self).__init__() self.n_enc = n_enc self.n_dec = n_dec self.num_labels = n_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.n_dec)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.n_enc)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self.n_dec, self.n_enc)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.W_d) nn.init.xavier_uniform_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_d, input_e, mask_d=None, mask_e=None): """ Args: input_d: Tensor the decoder input tensor with shape = [batch, length_decoder, input_size] input_e: Tensor the child input tensor with shape = [batch, length_encoder, input_size] mask_d: Tensor or None the mask tensor for decoder with shape = [batch, length_decoder] mask_e: Tensor or None the mask tensor for encoder with shape = [batch, length_encoder] Returns: Tensor the energy tensor with shape = [batch, num_label, length, length] """ assert input_d.size(0) == input_e.size(0 ), 'batch sizes of encoder and decoder are requires to be equal.' _batch, _length_decoder, _ = input_d.size() _, _length_encoder, _ = input_e.size() out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3) out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2) if self.biaffine: output = torch.matmul(input_d.unsqueeze(1), self.U) output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3)) output = output + out_d + out_e + self.b else: output = out_d + out_d + self.b if mask_d is not None: output = output * mask_d.unsqueeze(1).unsqueeze(3 ) * mask_e.unsqueeze(1).unsqueeze(2) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_enc': 4, 'n_dec': 4, 'n_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_5, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = buf3 del buf3 triton_poi_fused_clone_2[grid(256)](primals_2, buf5, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_add_3[grid(256)](buf7, buf0, buf1, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_6 return buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0) class BiAffineNew(nn.Module): def __init__(self, n_enc, n_dec, n_labels, biaffine=True, **kwargs): """ Args: n_enc: int the dimension of the encoder input. n_dec: int the dimension of the decoder input. n_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super(BiAffineNew, self).__init__() self.n_enc = n_enc self.n_dec = n_dec self.num_labels = n_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.n_dec)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.n_enc)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self.n_dec, self.n_enc)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.W_d) nn.init.xavier_uniform_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_0, input_1): primals_3 = self.W_d primals_4 = self.W_e primals_6 = self.b primals_1 = self.U primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
FengZiYjun/fastNLP
BiAffine
false
5,163
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch from torch import nn import torch.utils.data from torch.nn import Parameter class Model(nn.Module): def __init__(self, n_enc, n_dec, n_labels, biaffine=True, **kwargs): """ Args: n_enc: int the dimension of the encoder input. n_dec: int the dimension of the decoder input. n_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super().__init__() self.n_enc = n_enc self.n_dec = n_dec self.num_labels = n_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.n_dec)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.n_enc)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self.n_dec, self.n_enc)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.W_d) nn.init.xavier_uniform_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_d, input_e, mask_d=None, mask_e=None): """ Args: input_d: Tensor the decoder input tensor with shape = [batch, length_decoder, input_size] input_e: Tensor the child input tensor with shape = [batch, length_encoder, input_size] mask_d: Tensor or None the mask tensor for decoder with shape = [batch, length_decoder] mask_e: Tensor or None the mask tensor for encoder with shape = [batch, length_encoder] Returns: Tensor the energy tensor with shape = [batch, num_label, length, length] """ assert input_d.size(0) == input_e.size(0 ), 'batch sizes of encoder and decoder are requires to be equal.' _batch, _length_decoder, _ = input_d.size() _, _length_encoder, _ = input_e.size() out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3) out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2) if self.biaffine: output = torch.matmul(input_d.unsqueeze(1), self.U) output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3)) output = output + out_d + out_e + self.b else: output = out_d + out_d + self.b if mask_d is not None: output = output * mask_d.unsqueeze(1).unsqueeze(3 ) * mask_e.unsqueeze(1).unsqueeze(2) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/uo/cuol6hlcyvtncabpqu2fbv4v6fv2hshuxi3jnfznm5j2ht34q2kw.py # Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # exp => exp # exp_1 => exp_1 # exp_2 => exp_3 # invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3 # log => log # loss => add_2 # loss_1 => mul_4 # max_val => clamp_min # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # neg => neg # neg_1 => neg_1 # neg_2 => neg_2 # neg_3 => neg_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 2.0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1.0), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {}) # %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 2), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mul), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %clamp_min), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %log), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %add_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {}) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0 = async_compile.triton('triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tl_math.exp(tmp15) tmp17 = tmp0 * tmp2 tmp18 = tmp0 - tmp17 tmp19 = triton_helpers.maximum(tmp1, tmp8) tmp20 = tmp18 + tmp19 tmp21 = -tmp19 tmp22 = tl_math.exp(tmp21) tmp23 = tmp1 - tmp19 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tl_math.log(tmp25) tmp27 = tmp20 + tmp26 tmp28 = tmp16 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp31 / tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class FocalLoss(nn.Module): """ from https://github.com/CellProfiling/HPA-competition-solutions/blob/master/bestfitting/src/layers/loss.py """ def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, logit, target): max_val = (-logit).clamp(min=0) loss = logit - logit * target + max_val + ((-max_val).exp() + (- logit - max_val).exp()).log() invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0)) loss = (invprobs * self.gamma).exp() * loss if len(loss.size()) == 2: loss = loss.sum(dim=1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tl_math.exp(tmp15) tmp17 = tmp0 * tmp2 tmp18 = tmp0 - tmp17 tmp19 = triton_helpers.maximum(tmp1, tmp8) tmp20 = tmp18 + tmp19 tmp21 = -tmp19 tmp22 = tl_math.exp(tmp21) tmp23 = tmp1 - tmp19 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tl_math.log(tmp25) tmp27 = tmp20 + tmp26 tmp28 = tmp16 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp31 / tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FocalLossNew(nn.Module): """ from https://github.com/CellProfiling/HPA-competition-solutions/blob/master/bestfitting/src/layers/loss.py """ def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Fkaneko/kaggle-hpa-single-cell-image-classification
FocalLoss
false
5,164
[ "MIT" ]
1
52000cbf5c7eec6ace29274d9e85b5b24fac281b
https://github.com/Fkaneko/kaggle-hpa-single-cell-image-classification/tree/52000cbf5c7eec6ace29274d9e85b5b24fac281b
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """ from https://github.com/CellProfiling/HPA-competition-solutions/blob/master/bestfitting/src/layers/loss.py """ def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, logit, target): max_val = (-logit).clamp(min=0) loss = logit - logit * target + max_val + ((-max_val).exp() + (- logit - max_val).exp()).log() invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0)) loss = (invprobs * self.gamma).exp() * loss if len(loss.size()) == 2: loss = loss.sum(dim=1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xc/cxc6b6yaoqrxygbhhvqslfh3evd2idz6ndtwi246ntlpvok4xlz7.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # out_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 1000 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (6, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 4, 32, 32), (4096, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (1000, 400), (400, 1)) assert_size_stride(primals_7, (1000, ), (1, )) assert_size_stride(primals_8, (10, 1000), (1000, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) buf8 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 1000), (1, 400), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 4000, grid=grid(4000), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (1000, 10), (1, 1000), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 32, 32), (4096, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1000, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 1000), (1000, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class ConvNet(nn.Module): def __init__(self, NumChannels): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(NumChannels, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 1000) self.fc2 = nn.Linear(1000, 10) def forward(self, x): out = self.pool(F.relu(self.conv1(x))) out = self.pool(F.relu(self.conv2(out))) out = out.view(-1, 16 * 5 * 5) out = F.relu(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 32, 32])] def get_init_inputs(): return [[], {'NumChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 1000 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (6, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 4, 32, 32), (4096, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (1000, 400), (400, 1)) assert_size_stride(primals_7, (1000,), (1,)) assert_size_stride(primals_8, (10, 1000), (1000, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 1000), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(4000)](buf9, primals_7, 4000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (1000, 10), (1, 1000), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6) class ConvNetNew(nn.Module): def __init__(self, NumChannels): super(ConvNetNew, self).__init__() self.conv1 = nn.Conv2d(NumChannels, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 1000) self.fc2 = nn.Linear(1000, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
FedericoZocco/VarMemLBFGS-PyTorch
ConvNet
false
5,165
[ "MIT" ]
1
5a0ed7b95fc71c9a421a07071f8d5199cf6a6216
https://github.com/FedericoZocco/VarMemLBFGS-PyTorch/tree/5a0ed7b95fc71c9a421a07071f8d5199cf6a6216
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class Model(nn.Module): def __init__(self, NumChannels): super().__init__() self.conv1 = nn.Conv2d(NumChannels, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 1000) self.fc2 = nn.Linear(1000, 10) def forward(self, x): out = self.pool(F.relu(self.conv1(x))) out = self.pool(F.relu(self.conv2(out))) out = out.view(-1, 16 * 5 * 5) out = F.relu(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 32, 32])] def get_init_inputs(): return [4]
BCELoss2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jl/cjlfb7i5sda6iuixuufqhuq6ofisyq3co2j2rvs4i66aznhxymc2.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.binary_cross_entropy] # Source node to ATen node mapping: # loss => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.binary_cross_entropy] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BCELoss2d(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2d, self).__init__() self.criterion = nn.BCELoss(weight, size_average) def forward(self, inputs, targets): probs = F.sigmoid(inputs) probs_flat = probs.view(-1) targets_flat = targets.view(-1) loss = self.criterion(probs_flat, targets_flat) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELoss2dNew(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2dNew, self).__init__() self.criterion = nn.BCELoss(weight, size_average) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ForrestPi/SegDL
BCELoss2d
false
5,166
[ "MIT" ]
1
56f2ff229dfa7540704d6de50292c724693aac75
https://github.com/ForrestPi/SegDL/tree/56f2ff229dfa7540704d6de50292c724693aac75
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, weight=None, size_average=True): super().__init__() self.criterion = nn.BCELoss(weight, size_average) def forward(self, inputs, targets): probs = F.sigmoid(inputs) probs_flat = probs.view(-1) targets_flat = targets.view(-1) loss = self.criterion(probs_flat, targets_flat) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
T5LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sk/csk6e2ijxwozqfg5yafv56tovxgvs23ahvpatwnswiausxio766y.py # Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, hidden_states, mul_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # hidden_states => mul # mul_1 => mul_1 # pow_1 => pow_1 # rsqrt => rsqrt # variance => mean # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %mul), kwargs = {}) triton_poi_fused_add_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp1 * tmp17 tmp19 = tmp0 * tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, hidden_states, mul_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.checkpoint class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self. variance_epsilon) if self.weight.dtype == torch.float16: hidden_states = hidden_states return self.weight * hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = libdevice.rsqrt(tmp16) tmp18 = tmp1 * tmp17 tmp19 = tmp0 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class T5LayerNormNew(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Elvisambition/bert_seq2seq
T5LayerNorm
false
5,167
[ "Apache-2.0" ]
1
643ac537c16872f0d13200de06001d8201a54fbb
https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb
import torch import torch.nn as nn import torch.utils.checkpoint class Model(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self. variance_epsilon) if self.weight.dtype == torch.float16: hidden_states = hidden_states return self.weight * hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Scale
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cv/ccvh5pcjc57oskvep5v2okylauufvnesitt2j6dbyvr6fh3vwuer.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Scale(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, x): return x * self.scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ScaleNew(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
FranardoHuang/ROAR
Scale
false
5,168
[ "Apache-2.0" ]
1
859e22389907dd0e61c83980ae5ff6dae51341d3
https://github.com/FranardoHuang/ROAR/tree/859e22389907dd0e61c83980ae5ff6dae51341d3
import torch from torch import nn class Model(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, x): return x * self.scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [1.0]
GlobalAttentionGeneral
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qi/cqinh332474qtv7bgen4bcfz2yfclns66jnudr7z7wmvlrgqoduc.py # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] # Source node to ATen node mapping: # targetT => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {}) triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask) tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + (4*x1) + (64*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pm/cpmy57yidxxfl6wmlh5dsizlsat4uz6k43rz6t4r6h2u4z625i5l.py # Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_4 => clone_1 # Graph fragment: # %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr0 + ((4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] stream0 = get_raw_stream(0) triton_poi_fused_clone_transpose_0.run(primals_1, buf1, buf6, 16, 16, grid=grid(16, 16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [targetT, attn], Original ATen: [aten.clone, aten.bmm] extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 16, grid=grid(16, 16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [weightedContext], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneral(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneral, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'idf': 4, 'cdf': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask) tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6 def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneralNew(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneralNew, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input_0, input_1): primals_3 = self.conv_context.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
FiroshV/TTI
GlobalAttentionGeneral
false
5,169
[ "MIT" ]
1
4d5a40b0ec69a47faf5256caa6d731e95d1f7b9a
https://github.com/FiroshV/TTI/tree/4d5a40b0ec69a47faf5256caa6d731e95d1f7b9a
import torch import torch.nn as nn import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class Model(nn.Module): def __init__(self, idf, cdf): super().__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
ArcMarginProduct_subcenter
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2s/c2s7vvaz25xjuqqa7ymam5rco6ey7ycspmykfxm67txox57kysz2.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/o7/co7bzw6oskv7grmzjusyhgf3movzeocopdg3tnslwlwhxsiewsyw.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem, max_1 # Graph fragment: # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 2), kwargs = {}) # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_2 = async_compile.triton('triton_poi_fused_max_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (3*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (3*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 > tmp1 tmp6 = tmp0 == tmp1 tmp7 = tmp0 != tmp0 tmp8 = tmp1 != tmp1 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp0, tmp1) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp20 = tmp18 > tmp3 tmp21 = tmp18 == tmp3 tmp22 = tmp18 != tmp18 tmp23 = tmp3 != tmp3 tmp24 = tmp22 > tmp23 tmp25 = tmp20 | tmp24 tmp26 = tmp22 & tmp23 tmp27 = tmp21 | tmp26 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp19 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tmp25 | tmp30 tmp32 = tl.where(tmp31, tmp18, tmp3) tmp33 = tl.where(tmp31, tmp19, tmp28) tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp33, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 48, grid=grid(48), stream=stream0) buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_all], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 12), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] triton_poi_fused_max_2.run(buf2, buf3, buf4, 256, grid=grid(256), stream=stream0) del buf2 return (buf3, primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.nn as nn class ArcMarginProduct_subcenter(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine_all = F.linear(F.normalize(features), F.normalize(self.weight)) cosine_all = cosine_all.view(-1, self.out_features, self.k) cosine, _ = torch.max(cosine_all, dim=2) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 > tmp1 tmp6 = tmp0 == tmp1 tmp7 = tmp0 != tmp0 tmp8 = tmp1 != tmp1 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp0, tmp1) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp20 = tmp18 > tmp3 tmp21 = tmp18 == tmp3 tmp22 = tmp18 != tmp18 tmp23 = tmp3 != tmp3 tmp24 = tmp22 > tmp23 tmp25 = tmp20 | tmp24 tmp26 = tmp22 & tmp23 tmp27 = tmp21 | tmp26 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp19 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tmp25 | tmp30 tl.where(tmp31, tmp18, tmp3) tmp33 = tl.where(tmp31, tmp19, tmp28) tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp33, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(48)](primals_2, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 12), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.int64) triton_poi_fused_max_2[grid(256)](buf2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 1), 0) class ArcMarginProduct_subcenterNew(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Fkaneko/kaggle-hpa-single-cell-image-classification
ArcMarginProduct_subcenter
false
5,170
[ "MIT" ]
1
52000cbf5c7eec6ace29274d9e85b5b24fac281b
https://github.com/Fkaneko/kaggle-hpa-single-cell-image-classification/tree/52000cbf5c7eec6ace29274d9e85b5b24fac281b
import math import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine_all = F.linear(F.normalize(features), F.normalize(self.weight)) cosine_all = cosine_all.view(-1, self.out_features, self.k) cosine, _ = torch.max(cosine_all, dim=2) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
DownConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/it/citiw25o2bzpowghghyesgdfdm2lebsqwdmo6qg6foemy373lm2q.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 64, grid=grid(64), stream=stream0) return (buf4, buf3, primals_1, primals_3, primals_4, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1 ): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias, groups=groups) class DownConv(nn.Module): """ A helper Module that performs 2 convolutions and 1 MaxPool. A ReLU activation follows each convolution. """ def __init__(self, in_channels, out_channels, pooling=True): super(DownConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.pooling = pooling self.conv1 = conv3x3(self.in_channels, self.out_channels) self.conv2 = conv3x3(self.out_channels, self.out_channels) if self.pooling: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) before_pool = x if self.pooling: x = self.pool(x) return x, before_pool def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(64)](buf3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf4, buf3, primals_1, primals_3, primals_4, buf1, buf3, buf5 def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1 ): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias, groups=groups) class DownConvNew(nn.Module): """ A helper Module that performs 2 convolutions and 1 MaxPool. A ReLU activation follows each convolution. """ def __init__(self, in_channels, out_channels, pooling=True): super(DownConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.pooling = pooling self.conv1 = conv3x3(self.in_channels, self.out_channels) self.conv2 = conv3x3(self.out_channels, self.out_channels) if self.pooling: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
ForrestPi/SegDL
DownConv
false
5,171
[ "MIT" ]
1
56f2ff229dfa7540704d6de50292c724693aac75
https://github.com/ForrestPi/SegDL/tree/56f2ff229dfa7540704d6de50292c724693aac75
import torch import torch.nn as nn import torch.nn.functional as F def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1 ): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias, groups=groups) class Model(nn.Module): """ A helper Module that performs 2 convolutions and 1 MaxPool. A ReLU activation follows each convolution. """ def __init__(self, in_channels, out_channels, pooling=True): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.pooling = pooling self.conv1 = conv3x3(self.in_channels, self.out_channels) self.conv2 = conv3x3(self.out_channels, self.out_channels) if self.pooling: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) before_pool = x if self.pooling: x = self.pool(x) return x, before_pool def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
RefineLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xm/cxm7ych4a3r3bcjodg6asqprbzzj4cw6ffxffmk7ejqwx5ah2ldp.py # Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, gm, truediv, mul_2, mul_3, add_2, add_3, pm, truediv_2, mul_4, truediv_1, truediv_3, mul_5, add_4, abs_1, sub, Lcos, mul_8, mul_7, sub_1, Lmag, mul_9, add_5], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.div, aten.abs, aten.rsub, aten.sub, aten.clamp_min] # Source node to ATen node mapping: # Lcos => mul_6 # Lmag => clamp_min # abs_1 => abs_1 # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # gm => sqrt # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_7 => mul_7 # mul_8 => mul_8 # mul_9 => mul_9 # pm => sqrt_1 # sub => sub # sub_1 => sub_1 # truediv => div # truediv_1 => div_1 # truediv_2 => div_2 # truediv_3 => div_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %convolution), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %convolution_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-06), kwargs = {}) # %sqrt : [num_users=3] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution, %sqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %convolution_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, %convolution_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {}) # %sqrt_1 : [num_users=4] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_2, %sqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_1, %sqrt), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_3, %sqrt_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%add_4,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %abs_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sqrt_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, 0.5), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, 1.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_7, %sqrt_1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %mul_9), kwargs = {}) triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0 = async_compile.triton('triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp2 = tl.load(in_ptr0 + (x0), None) tmp9 = tl.load(in_ptr1 + (x0), None) tmp11 = tl.load(in_ptr2 + (x0), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp5 = 1e-06 tmp6 = tmp4 + tmp5 tmp7 = libdevice.sqrt(tmp6) tmp8 = tmp0 / tmp7 tmp10 = tmp9 * tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp13 + tmp5 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp9 / tmp15 tmp17 = tmp8 * tmp16 tmp18 = tmp2 / tmp7 tmp19 = tmp11 / tmp15 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl_math.abs(tmp21) tmp23 = 1.0 tmp24 = tmp23 - tmp22 tmp25 = tmp24 * tmp15 tmp26 = 0.5 tmp27 = tmp25 * tmp26 tmp28 = 1.5 tmp29 = tmp7 * tmp28 tmp30 = tmp29 - tmp15 tmp31 = 0.0 tmp32 = triton_helpers.maximum(tmp30, tmp31) tmp33 = tmp32 * tmp26 tmp34 = tmp27 + tmp33 tl.store(in_out_ptr0 + (x0), tmp34, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rt/crtesf3tgdclv4qr5rutmf2tfa2nho6q4ns6plnvp4zyafxujw65.py # Topologically Sorted Source Nodes: [Lrefine, Lrefine_1], Original ATen: [aten.mul, aten.mean] # Source node to ATen node mapping: # Lrefine => mul_10 # Lrefine_1 => mean # Graph fragment: # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %arg4_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_10,), kwargs = {}) triton_red_fused_mean_mul_1 = async_compile.triton('triton_red_fused_mean_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[8, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mean_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 8 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + ((4096*((r1 + (8192*x0)) // 16384)) + (r1 % 4096)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = _tmp4 + tmp3 _tmp4 = tl.where(rmask & xmask, tmp5, _tmp4) tmp4 = tl.sum(_tmp4, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sl/csl7zqsuuht4isdmcmdndxucluxnhhq774wue4va23y6v7bc3z2b.py # Topologically Sorted Source Nodes: [Lrefine, Lrefine_1], Original ATen: [aten.mul, aten.mean] # Source node to ATen node mapping: # Lrefine => mul_10 # Lrefine_1 => mean # Graph fragment: # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %arg4_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_10,), kwargs = {}) triton_per_fused_mean_mul_2 = async_compile.triton('triton_per_fused_mean_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 65536.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg3_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg4_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [gx], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) # Topologically Sorted Source Nodes: [gy], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(arg1_1, arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg1_1 # Topologically Sorted Source Nodes: [px], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(arg3_1, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg0_1 # Topologically Sorted Source Nodes: [py], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(arg3_1, arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg2_1 del arg3_1 buf4 = reinterpret_tensor(buf0, (4, 1, 64, 64), (4096, 16384, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, gm, truediv, mul_2, mul_3, add_2, add_3, pm, truediv_2, mul_4, truediv_1, truediv_3, mul_5, add_4, abs_1, sub, Lcos, mul_8, mul_7, sub_1, Lmag, mul_9, add_5], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.div, aten.abs, aten.rsub, aten.sub, aten.clamp_min] stream0 = get_raw_stream(0) triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0.run(buf4, buf1, buf2, buf3, 16384, grid=grid(16384), stream=stream0) del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((8, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [Lrefine, Lrefine_1], Original ATen: [aten.mul, aten.mean] triton_red_fused_mean_mul_1.run(buf4, arg4_1, buf5, 8, 8192, grid=grid(8), stream=stream0) del arg4_1 del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [Lrefine, Lrefine_1], Original ATen: [aten.mul, aten.mean] triton_per_fused_mean_mul_2.run(buf7, buf5, 1, 8, grid=grid(1), stream=stream0) del buf5 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) arg4_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class RefineLoss(nn.Module): def __init__(self, alpha=1.5, alpha1=0.5, reduction='mean'): super(RefineLoss, self).__init__() self.alpha = alpha self.alpha1 = alpha1 self.reduction = reduction self.fx = nn.Conv2d(1, 1, 3, padding=1, bias=False) self.fy = nn.Conv2d(1, 1, 3, padding=1, bias=False) ngx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32) ngy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=np.float32) self.fx.weight.data.copy_(torch.from_numpy(ngx)) self.fy.weight.data.copy_(torch.from_numpy(ngy)) for param in self.fx.parameters(): param.requires_grad = False for param in self.fy.parameters(): param.requires_grad = False def forward(self, grayimg, pred, mask): """ grayimg: gray scale input image pred: predicted mask mask: boundary mask. can be generate from ground truth foreground mask by morphological transformation """ gx = self.fx(grayimg) gy = self.fy(grayimg) px = self.fx(pred) py = self.fy(pred) gm = torch.sqrt(gx * gx + gy * gy + 1e-06) pm = torch.sqrt(px * px + py * py + 1e-06) gv = gx / gm, gy / gm pv = px / pm, py / pm Lcos = (1 - torch.abs(gv[0] * pv[0] + gv[1] * pv[1])) * pm Lmag = torch.clamp_min(self.alpha * gm - pm, 0) Lrefine = (self.alpha1 * Lcos + (1 - self.alpha1) * Lmag) * mask if self.reduction == 'mean': Lrefine = Lrefine.mean() elif self.reduction == 'sum': Lrefine = Lrefine.sum() return Lrefine def get_inputs(): return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64]), torch. rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp2 = tl.load(in_ptr0 + x0, None) tmp9 = tl.load(in_ptr1 + x0, None) tmp11 = tl.load(in_ptr2 + x0, None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp5 = 1e-06 tmp6 = tmp4 + tmp5 tmp7 = libdevice.sqrt(tmp6) tmp8 = tmp0 / tmp7 tmp10 = tmp9 * tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp13 + tmp5 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp9 / tmp15 tmp17 = tmp8 * tmp16 tmp18 = tmp2 / tmp7 tmp19 = tmp11 / tmp15 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl_math.abs(tmp21) tmp23 = 1.0 tmp24 = tmp23 - tmp22 tmp25 = tmp24 * tmp15 tmp26 = 0.5 tmp27 = tmp25 * tmp26 tmp28 = 1.5 tmp29 = tmp7 * tmp28 tmp30 = tmp29 - tmp15 tmp31 = 0.0 tmp32 = triton_helpers.maximum(tmp30, tmp31) tmp33 = tmp32 * tmp26 tmp34 = tmp27 + tmp33 tl.store(in_out_ptr0 + x0, tmp34, None) @triton.jit def triton_red_fused_mean_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 8 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (4096 * ((r1 + 8192 * x0) // 16384) + r1 % 4096), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = _tmp4 + tmp3 _tmp4 = tl.where(rmask & xmask, tmp5, _tmp4) tmp4 = tl.sum(_tmp4, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_per_fused_mean_mul_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 65536.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg3_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(arg4_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = extern_kernels.convolution(arg1_1, arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg1_1 buf2 = extern_kernels.convolution(arg3_1, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg0_1 buf3 = extern_kernels.convolution(arg3_1, arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1)) del arg2_1 del arg3_1 buf4 = reinterpret_tensor(buf0, (4, 1, 64, 64), (4096, 16384, 64, 1), 0 ) del buf0 get_raw_stream(0) triton_poi_fused_abs_add_clamp_min_div_mul_rsub_sqrt_sub_0[grid(16384) ](buf4, buf1, buf2, buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((8,), (1,), torch.float32) triton_red_fused_mean_mul_1[grid(8)](buf4, arg4_1, buf5, 8, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg4_1 del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_mean_mul_2[grid(1)](buf7, buf5, 1, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf5 return buf7, class RefineLossNew(nn.Module): def __init__(self, alpha=1.5, alpha1=0.5, reduction='mean'): super(RefineLossNew, self).__init__() self.alpha = alpha self.alpha1 = alpha1 self.reduction = reduction self.fx = nn.Conv2d(1, 1, 3, padding=1, bias=False) self.fy = nn.Conv2d(1, 1, 3, padding=1, bias=False) ngx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32) ngy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=np.float32) self.fx.weight.data.copy_(torch.from_numpy(ngx)) self.fy.weight.data.copy_(torch.from_numpy(ngy)) for param in self.fx.parameters(): param.requires_grad = False for param in self.fy.parameters(): param.requires_grad = False def forward(self, input_0, input_1, input_2): arg0_1 = self.fx.weight arg2_1 = self.fy.weight arg1_1 = input_0 arg3_1 = input_1 arg4_1 = input_2 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
ForrestPi/SegDL
RefineLoss
false
5,172
[ "MIT" ]
1
56f2ff229dfa7540704d6de50292c724693aac75
https://github.com/ForrestPi/SegDL/tree/56f2ff229dfa7540704d6de50292c724693aac75
import torch import numpy as np import torch.nn as nn class Model(nn.Module): def __init__(self, alpha=1.5, alpha1=0.5, reduction='mean'): super().__init__() self.alpha = alpha self.alpha1 = alpha1 self.reduction = reduction self.fx = nn.Conv2d(1, 1, 3, padding=1, bias=False) self.fy = nn.Conv2d(1, 1, 3, padding=1, bias=False) ngx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32) ngy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=np.float32) self.fx.weight.data.copy_(torch.from_numpy(ngx)) self.fy.weight.data.copy_(torch.from_numpy(ngy)) for param in self.fx.parameters(): param.requires_grad = False for param in self.fy.parameters(): param.requires_grad = False def forward(self, grayimg, pred, mask): """ grayimg: gray scale input image pred: predicted mask mask: boundary mask. can be generate from ground truth foreground mask by morphological transformation """ gx = self.fx(grayimg) gy = self.fy(grayimg) px = self.fx(pred) py = self.fy(pred) gm = torch.sqrt(gx * gx + gy * gy + 1e-06) pm = torch.sqrt(px * px + py * py + 1e-06) gv = gx / gm, gy / gm pv = px / pm, py / pm Lcos = (1 - torch.abs(gv[0] * pv[0] + gv[1] * pv[1])) * pm Lmag = torch.clamp_min(self.alpha * gm - pm, 0) Lrefine = (self.alpha1 * Lcos + (1 - self.alpha1) * Lmag) * mask if self.reduction == 'mean': Lrefine = Lrefine.mean() elif self.reduction == 'sum': Lrefine = Lrefine.sum() return Lrefine def get_inputs(): return [torch.rand([4, 1, 64, 64]), torch.rand([4, 1, 64, 64]), torch. rand([4, 4, 64, 64])] def get_init_inputs(): return []
Downsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jw/cjw2limywkgcxcmakwde6yhejpgbx3f2uhb57pqab2yabyayyfay.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 1, 0, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 5 x0 = xindex % 5 x2 = (xindex // 25) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 400, grid=grid(400), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.hub class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = 0, 1, 0, 1 x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class DownsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Frikallo/YAKbot
Downsample
false
5,173
[ "MIT" ]
1
bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
https://github.com/Frikallo/YAKbot/tree/bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
import torch import torch.nn as nn import torch.hub class Model(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = 0, 1, 0, 1 x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Upsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mt/cmt4roffhwfg6vw2odjfrgu4bjav3cztqx74kxjfq5igljucibfl.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.hub class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest' ) if self.with_conv: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class UpsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Frikallo/YAKbot
Upsample
false
5,174
[ "MIT" ]
1
bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
https://github.com/Frikallo/YAKbot/tree/bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc
import torch import torch.nn as nn import torch.hub class Model(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest' ) if self.with_conv: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2x/c2x66b6sza3svon43c774fqn45xzpdlajk7fj3gf6dzmp6nxl7jx.py # Topologically Sorted Source Nodes: [add, combined_states], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # combined_states => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = (xindex // 256) x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + (x6), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/df/cdf4yunxzwhg2apeu35u7judmlotrbwvwododu4qvc6xrng7w2yb.py # Topologically Sorted Source Nodes: [attention_weights, attention_weights_1], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # attention_weights => mul # attention_weights_1 => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x1 = (xindex // 4) % 16 x3 = (xindex // 256) x5 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (16 + x1 + (64*x3)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (32 + x1 + (64*x3)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (48 + x1 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp0 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp0 * tmp9 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + (x5), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, combined_states], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf0, primals_2, buf1, primals_5, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 del primals_5 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights, attention_weights_1], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_3, buf6, buf7, 1024, grid=grid(1024), stream=stream0) del buf6 return (buf7, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf4, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch as th from torch import nn import torch.nn.functional as F class Attention(nn.Module): def __init__(self, encoder_dim, decoder_dim, attention_dim): super(Attention, self).__init__() self.attention_dim = attention_dim self.W = nn.Linear(decoder_dim, attention_dim) self.U = nn.Linear(encoder_dim, attention_dim) self.A = nn.Linear(attention_dim, 1) def forward(self, features, hidden_state): u_hs = self.U(features) w_ah = self.W(hidden_state) combined_states = th.tanh(u_hs + w_ah.unsqueeze(1)) attention_scores = self.A(combined_states) attention_scores = attention_scores.squeeze(2) alpha = F.softmax(attention_scores, dim=1) attention_weights = features * alpha.unsqueeze(2) attention_weights = attention_weights.sum(dim=1) return attention_weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x1 = xindex // 4 % 16 x3 = xindex // 256 x5 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (16 + x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (32 + x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (48 + x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp0 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp0 * tmp9 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + x5, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1, primals_5, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_3[grid(1024)](primals_3, buf6, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf6 return buf7, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), buf2, buf4, primals_7 class AttentionNew(nn.Module): def __init__(self, encoder_dim, decoder_dim, attention_dim): super(AttentionNew, self).__init__() self.attention_dim = attention_dim self.W = nn.Linear(decoder_dim, attention_dim) self.U = nn.Linear(encoder_dim, attention_dim) self.A = nn.Linear(attention_dim, 1) def forward(self, input_0, input_1): primals_1 = self.W.weight primals_2 = self.W.bias primals_4 = self.U.weight primals_5 = self.U.bias primals_7 = self.A.weight primals_8 = self.A.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
FranardoHuang/ROAR
Attention
false
5,175
[ "Apache-2.0" ]
1
859e22389907dd0e61c83980ae5ff6dae51341d3
https://github.com/FranardoHuang/ROAR/tree/859e22389907dd0e61c83980ae5ff6dae51341d3
import torch import torch as th from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, encoder_dim, decoder_dim, attention_dim): super().__init__() self.attention_dim = attention_dim self.W = nn.Linear(decoder_dim, attention_dim) self.U = nn.Linear(encoder_dim, attention_dim) self.A = nn.Linear(attention_dim, 1) def forward(self, features, hidden_state): u_hs = self.U(features) w_ah = self.W(hidden_state) combined_states = th.tanh(u_hs + w_ah.unsqueeze(1)) attention_scores = self.A(combined_states) attention_scores = attention_scores.squeeze(2) alpha = F.softmax(attention_scores, dim=1) attention_weights = features * alpha.unsqueeze(2) attention_weights = attention_weights.sum(dim=1) return attention_weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
DeterministicCriticNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sm/csm4ofalq42npqq7fv6jo3il6ujywmjwqnazwa5z35h4asxel7vx.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 68 x1 = (xindex // 68) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 68, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-64) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mt/cmttmov7q7l6eww5wgel4xbdmlbbf53sgwydh2ovfk4ks65mt3ki.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/it/cit4qjb7wmwrbvv2rtchpn3duppvfiyliqnz2jz3tymwbqqane7m.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (64, 68), (68, 1)) assert_size_stride(primals_6, (64, ), (1, )) assert_size_stride(primals_7, (1, 64), (64, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_3, primals_1, buf1, 272, grid=grid(272), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (68, 64), (1, 68), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_6, 256, grid=grid(256), stream=stream0) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 64), (64, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf0, primals_3, buf6, 256, grid=grid(256), stream=stream0) del buf0 del primals_3 return (buf5, primals_4, buf1, buf3, primals_7, primals_5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, 68), (68, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class DeterministicCriticNet(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, gpu=False, batch_norm=False, non_linear=F.relu, hidden_size=64): super(DeterministicCriticNet, self).__init__() self.layer1 = nn.Linear(state_dim, hidden_size) self.layer2 = nn.Linear(hidden_size + action_dim, hidden_size) self.layer3 = nn.Linear(hidden_size, 1) self.non_linear = non_linear if batch_norm: self.bn1 = nn.BatchNorm1d(hidden_size) self.bn2 = nn.BatchNorm1d(hidden_size) self.batch_norm = batch_norm BasicNet.__init__(self, None, gpu, False) self.init_weights() def init_weights(self): bound = 0.003 self.layer3.weight.data.uniform_(-bound, bound) self.layer3.bias.data.fill_(0) def fanin(size): v = 1.0 / np.sqrt(size[1]) return torch.FloatTensor(size).uniform_(-v, v) self.layer1.weight.data = fanin(self.layer1.weight.data.size()) self.layer1.bias.data.fill_(0) self.layer2.weight.data = fanin(self.layer2.weight.data.size()) self.layer2.bias.data.fill_(0) def forward(self, x, action): x = self.to_torch_variable(x) action = self.to_torch_variable(action) x = self.non_linear(self.layer1(x)) if self.batch_norm: x = self.bn1(x) x = self.non_linear(self.layer2(torch.cat([x, action], dim=1))) if self.batch_norm: x = self.bn2(x) x = self.layer3(x) return x def predict(self, x, action): return self.forward(x, action) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 68 x1 = xindex // 68 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 68, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-64 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (64, 68), (68, 1)) assert_size_stride(primals_6, (64,), (1,)) assert_size_stride(primals_7, (1, 64), (64, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(272)](buf0, primals_3, primals_1, buf1, 272, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (68, 64), (1, 68), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(256)](buf3, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 64), (64, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf0, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 return buf5, primals_4, buf1, buf3, primals_7, primals_5, buf6 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class DeterministicCriticNetNew(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, gpu=False, batch_norm=False, non_linear=F.relu, hidden_size=64): super(DeterministicCriticNetNew, self).__init__() self.layer1 = nn.Linear(state_dim, hidden_size) self.layer2 = nn.Linear(hidden_size + action_dim, hidden_size) self.layer3 = nn.Linear(hidden_size, 1) self.non_linear = non_linear if batch_norm: self.bn1 = nn.BatchNorm1d(hidden_size) self.bn2 = nn.BatchNorm1d(hidden_size) self.batch_norm = batch_norm BasicNet.__init__(self, None, gpu, False) self.init_weights() def init_weights(self): bound = 0.003 self.layer3.weight.data.uniform_(-bound, bound) self.layer3.bias.data.fill_(0) def fanin(size): v = 1.0 / np.sqrt(size[1]) return torch.FloatTensor(size).uniform_(-v, v) self.layer1.weight.data = fanin(self.layer1.weight.data.size()) self.layer1.bias.data.fill_(0) self.layer2.weight.data = fanin(self.layer2.weight.data.size()) self.layer2.bias.data.fill_(0) def predict(self, x, action): return self.forward(x, action) def forward(self, input_0, input_1): primals_2 = self.layer1.weight primals_3 = self.layer1.bias primals_5 = self.layer2.weight primals_6 = self.layer2.bias primals_7 = self.layer3.weight primals_8 = self.layer3.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
G-Flor/deeprl
DeterministicCriticNet
false
5,176
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class Model(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, gpu=False, batch_norm=False, non_linear=F.relu, hidden_size=64): super().__init__() self.layer1 = nn.Linear(state_dim, hidden_size) self.layer2 = nn.Linear(hidden_size + action_dim, hidden_size) self.layer3 = nn.Linear(hidden_size, 1) self.non_linear = non_linear if batch_norm: self.bn1 = nn.BatchNorm1d(hidden_size) self.bn2 = nn.BatchNorm1d(hidden_size) self.batch_norm = batch_norm BasicNet.__init__(self, None, gpu, False) self.init_weights() def init_weights(self): bound = 0.003 self.layer3.weight.data.uniform_(-bound, bound) self.layer3.bias.data.fill_(0) def fanin(size): v = 1.0 / np.sqrt(size[1]) return torch.FloatTensor(size).uniform_(-v, v) self.layer1.weight.data = fanin(self.layer1.weight.data.size()) self.layer1.bias.data.fill_(0) self.layer2.weight.data = fanin(self.layer2.weight.data.size()) self.layer2.bias.data.fill_(0) def forward(self, x, action): x = self.to_torch_variable(x) action = self.to_torch_variable(action) x = self.non_linear(self.layer1(x)) if self.batch_norm: x = self.bn1(x) x = self.non_linear(self.layer2(torch.cat([x, action], dim=1))) if self.batch_norm: x = self.bn2(x) x = self.layer3(x) return x def predict(self, x, action): return self.forward(x, action) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
MTFullyConnected
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5y/c5ycegys2ar7wz6epy2shzbmz2mkdkn4qmpsdbgrzhhu5ufhemkx.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8000 x1 = (xindex // 8000) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x0 + (8064*x1)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2d/c2dldo522mnd74n46v2lrkgeutrjrvjind75q4sshb25ha2k2cdg.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4000 x1 = (xindex // 4000) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x0 + (4096*x1)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4o/c4o2m7ozfavldjjmvy54d5j4mjtrlcvp7qonixpmh5h2yro5ycts.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y_2 => relu_2 # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = (xindex // 2000) tmp0 = tl.load(in_out_ptr0 + (x0 + (2016*x1)), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0 + (2016*x1)), tmp4, xmask) tl.store(out_ptr0 + (x0 + (2048*x1)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xp/cxpqywcqam7evubfwwa5zmt733w2zov6otomgqgpramgjdsnjg5k.py # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # y_3 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_7,), kwargs = {}) triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (8000, 4), (4, 1)) assert_size_stride(primals_2, (8000, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4000, 8000), (8000, 1)) assert_size_stride(primals_5, (4000, ), (1, )) assert_size_stride(primals_6, (2000, 4000), (4000, 1)) assert_size_stride(primals_7, (2000, ), (1, )) assert_size_stride(primals_8, (4, 2000), (2000, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0); del buf0 # reuse buf10 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1), torch.bool) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 512000, grid=grid(512000), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0); del buf2 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf9, 256000, grid=grid(256000), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0); del buf4 # reuse buf8 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf8, 128000, grid=grid(128000), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_3.run(buf7, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8000, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4000, 8000), (8000, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2000, 4000), (4000, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 2000), (2000, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import time import torch import numpy as np from torch import nn from torch import optim from torch.nn import functional as F class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class MTFullyConnected(Base): """Multi-task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_task (int): the No. of columns (tasks) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_task, is_reg=False): super(MTFullyConnected, self).__init__() self.n_task = n_task self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.output = nn.Linear(2000, n_task) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() else: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() self def forward(self, X, istrain=False): """Invoke the class directly as a function Arguments: X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features. istrain (bool, optional): is it invoked during training process (True) or just for prediction (False) Return: y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of tasks """ y = F.relu(self.fc0(X)) if istrain: y = self.dropout(y) y = F.relu(self.fc1(y)) if istrain: y = self.dropout(y) y = F.relu(self.fc2(y)) if istrain: y = self.dropout(y) if self.is_reg: y = self.output(y) else: y = self.activation(self.output(y)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dim': 4, 'n_task': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import time import numpy as np from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8000 x1 = xindex // 8000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 8064 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4000 x1 = xindex // 4000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask) tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8000, 4), (4, 1)) assert_size_stride(primals_2, (8000,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4000, 8000), (8000, 1)) assert_size_stride(primals_5, (4000,), (1,)) assert_size_stride(primals_6, (2000, 4000), (4000, 1)) assert_size_stride(primals_7, (2000,), (1,)) assert_size_stride(primals_8, (4, 2000), (2000, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1 ), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512000)](buf1, primals_2, buf10, 512000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3, primals_5, buf9, 256000, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(128000)](buf5, primals_7, buf8, 128000, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0 ), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), out=buf6 ) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_sigmoid_3[grid(256)](buf7, primals_9, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0 ), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10 class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class MTFullyConnectedNew(Base): """Multi-task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_task (int): the No. of columns (tasks) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_task, is_reg=False): super(MTFullyConnectedNew, self).__init__() self.n_task = n_task self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.output = nn.Linear(2000, n_task) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() else: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() self def forward(self, input_0): primals_1 = self.fc0.weight primals_2 = self.fc0.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.output.weight primals_9 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
EXYNOS-999/DrugEx
MTFullyConnected
false
5,177
[ "MIT" ]
1
f75a90fbc0b9863d594fbff6afecb0f866c076d6
https://github.com/EXYNOS-999/DrugEx/tree/f75a90fbc0b9863d594fbff6afecb0f866c076d6
import time import torch import numpy as np from torch import nn from torch import optim from torch.nn import functional as F class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) # ... truncated (>4000 chars) for memory efficiency
CRFLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qa/cqann7hwoinyovy2vyzy4ibqx7mllchfzwjuddbo4dnb622s42xn.py # Topologically Sorted Source Nodes: [point_score], Original ATen: [aten.clone] # Source node to ATen node mapping: # point_score => clone, clone_1 # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3.to(tl.int64) tmp5 = y0 tmp6 = tmp4 == tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) tl.store(out_ptr1 + (x2 + (4*y3)), tmp9, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/po/cposh7ysjkoi2sbaoetm5fywv2s4zfpbvppucd2rifanmckpivul.py # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.clone] # Source node to ATen node mapping: # trans_score => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 3 x2 = (xindex // 12) x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (x1 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = x0 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pn/cpn3jvca6vpvbwvtiucyv6pdlmitm3ybr7mlzfovx52qytknospe.py # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.clone] # Source node to ATen node mapping: # trans_score => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_11,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x2 = (xindex // 12) x1 = (xindex // 3) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (1 + x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = x1 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hf/chfoirflzf3s6voakzme7yjv5f2ujsqu2xfvfav66c3bxpshjxkh.py # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.clone] # Source node to ATen node mapping: # trans_score => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_12,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 3 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (12*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (3*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yk/cykupt67v623fg4u77yvgjix6qgyq4lga2j7higr2r5byxwna6qa.py # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.mul] # Source node to ATen node mapping: # y_pred => mul # Graph fragment: # %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4w/c4wxkacefkxlkdwtrpknjd4xcwthnn4dnwgp3dpven56qrd5avkn.py # Topologically Sorted Source Nodes: [add_1, max_1, sub, exp, sum_1, outputs_1, mul_2, sub_1, mul_3, outputs_2], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_1 => add_1 # exp => exp # max_1 => max_1 # mul_2 => mul_2 # mul_3 => mul_3 # outputs_1 => add_3 # outputs_2 => add_4 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_5, %unsqueeze_6), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1, True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %select_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %add_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %squeeze_1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5 = async_compile.triton('triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr3 + (4 + x0 + (16*x1)), xmask) tmp35 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + (x2), tmp37, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rq/crqvyeotxttezrof6dfenza4qbubj2o2imnx7qfvyohfvb4evf5u.py # Topologically Sorted Source Nodes: [add_5, max_2, sub_2, exp_1, sum_2, outputs_4, mul_4, sub_3, mul_5, outputs_5], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_5 => add_5 # exp_1 => exp_1 # max_2 => max_2 # mul_4 => mul_4 # mul_5 => mul_5 # outputs_4 => add_7 # outputs_5 => add_8 # sub_2 => sub_2 # sub_3 => sub_3 # sum_2 => sum_2 # Graph fragment: # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_7, %unsqueeze_6), kwargs = {}) # %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 1, True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_2), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %select_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %add_7), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %squeeze_3), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6 = async_compile.triton('triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr3 + (8 + x0 + (16*x1)), xmask) tmp35 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + (x2), tmp37, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pt/cpt5azy6amzl4ztrctbywk7om6atqwp4yv4xozqvndalocdcf5wg.py # Topologically Sorted Source Nodes: [add_9, max_3, sub_4, exp_2, sum_3, outputs_7, mul_6, sub_5, mul_7, outputs_8], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_9 => add_9 # exp_2 => exp_2 # max_3 => max_3 # mul_6 => mul_6 # mul_7 => mul_7 # outputs_7 => add_11 # outputs_8 => add_12 # sub_4 => sub_4 # sub_5 => sub_5 # sum_3 => sum_3 # Graph fragment: # %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_9, %unsqueeze_6), kwargs = {}) # %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_9, 1, True), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %getitem_4), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_4, %select_6), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %add_11), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_5), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %squeeze_5), kwargs = {}) # %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7 = async_compile.triton('triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr3 + (12 + x0 + (16*x1)), xmask) tmp35 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + (x2), tmp37, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bm/cbmmgubf3ag5cjfshwhdajghiutpy7kbqc4egs77exrlpf2bicmm.py # Topologically Sorted Source Nodes: [target_score, sub_7], Original ATen: [aten.add, aten.sub] # Source node to ATen node mapping: # sub_7 => sub_7 # target_score => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_12), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_6, %add), kwargs = {}) triton_poi_fused_add_sub_8 = async_compile.triton('triton_poi_fused_add_sub_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_out_ptr0 + (x0), xmask) tmp21 = tl.load(in_ptr1 + (x0), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tmp1 - tmp6 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp3 - tmp6 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp5 - tmp6 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 + tmp18 tmp22 = tmp20 + tmp21 tmp23 = tmp19 - tmp22 tl.store(in_out_ptr0 + (x0), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [point_score], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, primals_2, primals_3, buf0, buf1, 16, 4, grid=grid(16, 4), stream=stream0) buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [point_score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(buf1, (4, 16, 1), (16, 1, 0), 0), out=buf2) del buf0 buf3 = empty_strided_cuda((4, 3, 4, 1), (12, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_3, primals_2, buf3, 48, grid=grid(48), stream=stream0) buf4 = empty_strided_cuda((1, 12, 4), (48, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (1, 12, 4), (0, 4, 1), 0), reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_3, primals_2, buf5, 48, grid=grid(48), stream=stream0) del primals_3 buf6 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, buf6, 16, 3, grid=grid(16, 3), stream=stream0) del buf4 buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [trans_score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 12), (12, 0, 1), 0), reinterpret_tensor(buf6, (4, 12, 1), (12, 1, 0), 0), out=buf7) del buf6 buf8 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.mul] triton_poi_fused_mul_4.run(primals_1, primals_2, buf8, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [add_1, max_1, sub, exp, sum_1, outputs_1, mul_2, sub_1, mul_3, outputs_2], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.mul, aten.rsub] triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5.run(buf11, buf8, primals_4, primals_2, primals_1, 16, grid=grid(16), stream=stream0) buf13 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [add_5, max_2, sub_2, exp_1, sum_2, outputs_4, mul_4, sub_3, mul_5, outputs_5], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.mul, aten.rsub] triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6.run(buf14, buf11, primals_4, primals_2, primals_1, 16, grid=grid(16), stream=stream0) buf16 = reinterpret_tensor(buf11, (4, 1, 4), (4, 16, 1), 0); del buf11 # reuse buf17 = reinterpret_tensor(buf16, (4, 4), (4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [add_9, max_3, sub_4, exp_2, sum_3, outputs_7, mul_6, sub_5, mul_7, outputs_8], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.mul, aten.rsub] triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7.run(buf17, buf14, primals_4, primals_2, primals_1, 16, grid=grid(16), stream=stream0) del buf14 buf18 = reinterpret_tensor(buf2, (4, ), (1, ), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [target_score, sub_7], Original ATen: [aten.add, aten.sub] triton_poi_fused_add_sub_8.run(buf18, buf17, buf7, 4, grid=grid(4), stream=stream0) del buf17 del buf7 return (buf18, primals_1, primals_2, primals_4, reinterpret_tensor(buf8, (4, 4, 1), (16, 1, 1), 0), reinterpret_tensor(buf5, (4, 12, 1), (12, 1, 12), 0), reinterpret_tensor(buf3, (1, 4, 12), (48, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.checkpoint class CRFLayer(nn.Module): """ """ def __init__(self, output_dim): super(CRFLayer, self).__init__() self.output_dim = output_dim self.trans = nn.Parameter(torch.Tensor(output_dim, output_dim)) self.trans.data.uniform_(-0.1, 0.1) def compute_loss(self, y_pred, y_true, mask): """ 计算CRF损失 """ y_pred = y_pred * mask y_true = y_true * mask target_score = self.target_score(y_pred, y_true) log_norm = self.log_norm_step(y_pred, mask) log_norm = self.logsumexp(log_norm, dim=1) return log_norm - target_score def forward(self, y_pred, y_true, mask): """ y_true: [[1, 2, 3], [2, 3, 0] ] mask: [[1, 1, 1], [1, 1, 0]] """ if y_pred.shape[0] != mask.shape[0] or y_pred.shape[1] != mask.shape[1 ]: raise Exception('mask shape is not match to y_pred shape') mask = mask.reshape((mask.shape[0], mask.shape[1], 1)) mask = mask.float() y_true = y_true.reshape(y_pred.shape[:-1]) y_true = y_true.long() y_true_onehot = F.one_hot(y_true, self.output_dim) y_true_onehot = y_true_onehot.float() return self.compute_loss(y_pred, y_true_onehot, mask) def target_score(self, y_pred, y_true): """ 计算状态标签得分 + 转移标签得分 y_true: (batch, seq_len, out_dim) y_pred: (batch, seq_len, out_dim) """ point_score = torch.einsum('bni,bni->b', y_pred, y_true) trans_score = torch.einsum('bni,ij,bnj->b', y_true[:, :-1], self. trans, y_true[:, 1:]) return point_score + trans_score def log_norm_step(self, y_pred, mask): """ 计算归一化因子Z(X) """ state = y_pred[:, 0] y_pred = y_pred[:, 1:].contiguous() mask = mask[:, 1:].contiguous() _batch, seq_len, _out_dim = y_pred.shape for t in range(seq_len): cur_mask = mask[:, t] state = torch.unsqueeze(state, 2) g = torch.unsqueeze(self.trans, 0) outputs = self.logsumexp(state + g, dim=1) outputs = outputs + y_pred[:, t] outputs = cur_mask * outputs + (1 - cur_mask) * state.squeeze(-1) state = outputs return outputs def logsumexp(self, x, dim=None, keepdim=False): """ 避免溢出 """ if dim is None: x, dim = x.view(-1), 0 xm, _ = torch.max(x, dim, keepdim=True) out = xm + torch.log(torch.sum(torch.exp(x - xm), dim=dim, keepdim= True)) return out if keepdim else out.squeeze(dim) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 1])] def get_init_inputs(): return [[], {'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3.to(tl.int64) tmp5 = y0 tmp6 = tmp4 == tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp9, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 3 x2 = xindex // 12 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr1 + (x1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.int64) tmp2 = x0 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x2 = xindex // 12 x1 = xindex // 3 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (1 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (1 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = x1 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.float32) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 3 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 12 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 3 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + (4 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + (8 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr0 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + (12 + x0 + 16 * x1), xmask) tmp35 = tl.load(in_ptr0 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.log(tmp25) tmp28 = tmp14 + tmp27 tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tmp26 * tmp31 tmp33 = 1.0 tmp34 = tmp33 - tmp26 tmp36 = tmp34 * tmp35 tmp37 = tmp32 + tmp36 tl.store(in_out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_sub_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_out_ptr0 + x0, xmask) tmp21 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tmp1 - tmp6 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp3 - tmp6 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp5 - tmp6 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 + tmp18 tmp22 = tmp20 + tmp21 tmp23 = tmp19 - tmp22 tl.store(in_out_ptr0 + x0, tmp23, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, primals_2, primals_3, buf0, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(buf1, (4, 16, 1), (16, 1, 0), 0), out=buf2) del buf0 buf3 = empty_strided_cuda((4, 3, 4, 1), (12, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(48)](primals_3, primals_2, buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((1, 12, 4), (48, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (1, 12, 4), (0, 4, 1), 0), reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(48)](primals_3, primals_2, buf5, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 3)](buf4, buf6, 16, 3, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf4 buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 12), (12, 0, 1), 0), reinterpret_tensor(buf6, (4, 12, 1), (12, 1, 0), 0), out=buf7) del buf6 buf8 = buf1 del buf1 triton_poi_fused_mul_4[grid(64)](primals_1, primals_2, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 triton_poi_fused_add_exp_max_mul_rsub_sub_sum_5[grid(16)](buf11, buf8, primals_4, primals_2, primals_1, 16, XBLOCK=16, num_warps =1, num_stages=1) buf13 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 triton_poi_fused_add_exp_max_mul_rsub_sub_sum_6[grid(16)](buf14, buf11, primals_4, primals_2, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf11, (4, 1, 4), (4, 16, 1), 0) del buf11 buf17 = reinterpret_tensor(buf16, (4, 4), (4, 1), 0) del buf16 triton_poi_fused_add_exp_max_mul_rsub_sub_sum_7[grid(16)](buf17, buf14, primals_4, primals_2, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 buf18 = reinterpret_tensor(buf2, (4,), (1,), 0) del buf2 triton_poi_fused_add_sub_8[grid(4)](buf18, buf17, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf17 del buf7 return buf18, primals_1, primals_2, primals_4, reinterpret_tensor(buf8, (4, 4, 1), (16, 1, 1), 0), reinterpret_tensor(buf5, (4, 12, 1), (12, 1, 12), 0), reinterpret_tensor(buf3, (1, 4, 12), (48, 1, 4), 0) class CRFLayerNew(nn.Module): """ """ def __init__(self, output_dim): super(CRFLayerNew, self).__init__() self.output_dim = output_dim self.trans = nn.Parameter(torch.Tensor(output_dim, output_dim)) self.trans.data.uniform_(-0.1, 0.1) def compute_loss(self, y_pred, y_true, mask): """ 计算CRF损失 """ y_pred = y_pred * mask y_true = y_true * mask target_score = self.target_score(y_pred, y_true) log_norm = self.log_norm_step(y_pred, mask) log_norm = self.logsumexp(log_norm, dim=1) return log_norm - target_score def target_score(self, y_pred, y_true): """ 计算状态标签得分 + 转移标签得分 y_true: (batch, seq_len, out_dim) y_pred: (batch, seq_len, out_dim) """ point_score = torch.einsum('bni,bni->b', y_pred, y_true) trans_score = torch.einsum('bni,ij,bnj->b', y_true[:, :-1], self. trans, y_true[:, 1:]) return point_score + trans_score def log_norm_step(self, y_pred, mask): """ 计算归一化因子Z(X) """ state = y_pred[:, 0] y_pred = y_pred[:, 1:].contiguous() mask = mask[:, 1:].contiguous() _batch, seq_len, _out_dim = y_pred.shape for t in range(seq_len): cur_mask = mask[:, t] state = torch.unsqueeze(state, 2) g = torch.unsqueeze(self.trans, 0) outputs = self.logsumexp(state + g, dim=1) outputs = outputs + y_pred[:, t] outputs = cur_mask * outputs + (1 - cur_mask) * state.squeeze(-1) state = outputs return outputs def logsumexp(self, x, dim=None, keepdim=False): """ 避免溢出 """ if dim is None: x, dim = x.view(-1), 0 xm, _ = torch.max(x, dim, keepdim=True) out = xm + torch.log(torch.sum(torch.exp(x - xm), dim=dim, keepdim= True)) return out if keepdim else out.squeeze(dim) def forward(self, input_0, input_1, input_2): primals_3 = self.trans primals_1 = input_0 primals_4 = input_1 primals_2 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Elvisambition/bert_seq2seq
CRFLayer
false
5,178
[ "Apache-2.0" ]
1
643ac537c16872f0d13200de06001d8201a54fbb
https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.checkpoint class Model(nn.Module): """ """ def __init__(self, output_dim): super().__init__() self.output_dim = output_dim self.trans = nn.Parameter(torch.Tensor(output_dim, output_dim)) self.trans.data.uniform_(-0.1, 0.1) def compute_loss(self, y_pred, y_true, mask): """ 计算CRF损失 """ y_pred = y_pred * mask y_true = y_true * mask target_score = self.target_score(y_pred, y_true) log_norm = self.log_norm_step(y_pred, mask) log_norm = self.logsumexp(log_norm, dim=1) return log_norm - target_score def forward(self, y_pred, y_true, mask): """ y_true: [[1, 2, 3], [2, 3, 0] ] mask: [[1, 1, 1], [1, 1, 0]] """ if y_pred.shape[0] != mask.shape[0] or y_pred.shape[1] != mask.shape[1 ]: raise Exception('mask shape is not match to y_pred shape') mask = mask.reshape((mask.shape[0], mask.shape[1], 1)) mask = mask.float() y_true = y_true.reshape(y_pred.shape[:-1]) y_true = y_true.long() y_true_onehot = F.one_hot(y_true, self.output_dim) y_true_onehot = y_true_onehot.float() return self.compute_loss(y_pred, y_true_onehot, mask) def target_score(self, y_pred, y_true): """ 计算状态标签得分 + 转移标签得分 y_true: (batch, seq_len, out_dim) y_pred: (batch, seq_len, out_dim) """ point_score = torch.einsum('bni,bni->b', y_pred, y_true) trans_score = torch.einsum('bni,ij,bnj->b', y_true[:, :-1], self. trans, y_true[:, 1:]) return point_score + trans_score def log_norm_step(self, y_pred, mask): """ 计算归一化因子Z(X) """ state = y_pred[:, 0] y_pred = y_pred[:, 1:].contiguous() mask = mask[:, 1:].contiguous() _batch, seq_len, _out_dim = y_pred.shape for t in range(seq_len): cur_mask = mask[:, t] state = torch.unsqueeze(state, 2) g = torch.unsqueeze(self.trans, 0) outputs = self.logsumexp(state + g, dim=1) outputs = outputs + y_pred[:, t] outputs = cur_mask * outputs + (1 - cur_mask) * state.squeeze(-1) state = outputs return outputs def logsumexp(self, x, dim=None, keepdim=False): """ 避免溢出 """ if dim is None: x, dim = x.view(-1), 0 xm, _ = torch.max(x, dim, keepdim=True) out = xm + torch.log(torch.sum(torch.exp(x - xm), dim=dim, keepdim= True)) return out if keepdim else out.squeeze(dim) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 1])] def get_init_inputs(): return [4]
GaussianCriticNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [phi], Original ATen: [aten.tanh] # Source node to ATen node mapping: # phi => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (1, 64), (64, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [phi], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [phi_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_7 return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianCriticNet(nn.Module, BasicNet): def __init__(self, state_dim, gpu=False, hidden_size=64): super(GaussianCriticNet, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc_value = nn.Linear(hidden_size, 1) BasicNet.__init__(self, None, gpu, False) def forward(self, x): x = self.to_torch_variable(x) phi = F.tanh(self.fc1(x)) phi = F.tanh(self.fc2(phi)) value = self.fc_value(phi) return value def predict(self, x): return self.forward(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (1, 64), (64, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_3, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianCriticNetNew(nn.Module, BasicNet): def __init__(self, state_dim, gpu=False, hidden_size=64): super(GaussianCriticNetNew, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc_value = nn.Linear(hidden_size, 1) BasicNet.__init__(self, None, gpu, False) def predict(self, x): return self.forward(x) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc_value.weight primals_7 = self.fc_value.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
G-Flor/deeprl
GaussianCriticNet
false
5,179
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class Model(nn.Module, BasicNet): def __init__(self, state_dim, gpu=False, hidden_size=64): super().__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc_value = nn.Linear(hidden_size, 1) BasicNet.__init__(self, None, gpu, False) def forward(self, x): x = self.to_torch_variable(x) phi = F.tanh(self.fc1(x)) phi = F.tanh(self.fc2(phi)) value = self.fc_value(phi) return value def predict(self, x): return self.forward(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ConditionalRandomField
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5m/c5myotvw3pdemp5pfmlym472pjclzco6isrgqghjub4i7ol7yrkn.py # Topologically Sorted Source Nodes: [add, tmp, max_1, sub, exp, sum_1], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum] # Source node to ATen node mapping: # add => add # exp => exp # max_1 => max_1 # sub => sub # sum_1 => sum_1 # tmp => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %view), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %view_1), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1, True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused_add_exp_max_sub_sum_0 = async_compile.triton('triton_poi_fused_add_exp_max_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_max_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + (x2), tmp19, xmask) tl.store(out_ptr1 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7f/c7fumiwtt6kidxj2ourt5ztjgb66maxodqoobflayhv3vaf4yzwz.py # Topologically Sorted Source Nodes: [add_4, tmp_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_4 => add_4 # tmp_1 => add_5 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %view_5), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %view_1), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x2 = (xindex // 16) x1 = (xindex // 4) % 4 x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xb/cxbjkkwpz3o6faa425lkz7ibnew4joeb34s3s2vlwo6ps5yzcqik.py # Topologically Sorted Source Nodes: [mul, mul_1, alpha_1, mul_2, mul_3, alpha_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # alpha_1 => add_3 # alpha_2 => add_7 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %view_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %view_4), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %view_8), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %view_9), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (x2), xmask) tmp24 = tl.load(in_out_ptr0 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (x0 + (16*x1)), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tmp1 - tmp6 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp3 - tmp6 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp5 - tmp6 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp18 + tmp6 tmp21 = tmp19 * tmp20 tmp23 = tl_math.log(tmp22) tmp25 = tmp23 + tmp24 tmp27 = tmp25 * tmp26 tmp29 = 1.0 tmp30 = tmp29 - tmp26 tmp31 = tmp28 * tmp30 tmp32 = tmp27 + tmp31 tmp33 = tmp29 - tmp20 tmp34 = tmp32 * tmp33 tmp35 = tmp21 + tmp34 tl.store(in_out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ue/cuen3hq2nipfticixxtzr3g7g3plhadomql2lcdufeljpmsl3i34.py # Topologically Sorted Source Nodes: [add_8, tmp_2, max_3, sub_4, exp_2, sum_3], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum] # Source node to ATen node mapping: # add_8 => add_8 # exp_2 => exp_2 # max_3 => max_3 # sub_4 => sub_4 # sum_3 => sum_3 # tmp_2 => add_9 # Graph fragment: # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_12, %view_10), kwargs = {}) # %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %view_1), kwargs = {}) # %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_9, 1, True), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %getitem_4), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) triton_poi_fused_add_exp_max_sub_sum_3 = async_compile.triton('triton_poi_fused_add_exp_max_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_max_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + (x2), tmp19, xmask) tl.store(out_ptr1 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6y/c6y3cpr5dht4m6qhclw6j7ib73qcctr2lilhvjlb7rzrtzfibocs.py # Topologically Sorted Source Nodes: [tags], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # tags => convert_element_type # Graph fragment: # %convert_element_type : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%permute_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_4 = async_compile.triton('triton_poi_fused__to_copy_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + (x1 + (4*y0)), tmp1, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ge/cge7ucglxa3nobf36fyehqsf3q4xpfp6y3tdnycfaogkmxiauodw.py # Topologically Sorted Source Nodes: [getitem_19], Original ATen: [aten.slice] # Source node to ATen node mapping: # getitem_19 => slice_2 # Graph fragment: # %slice_2 : [num_users=2] = call_function[target=torch.ops.aten.slice.Tensor](args = (%convert_element_type, 0, 1, 9223372036854775807), kwargs = {}) triton_poi_fused_slice_5 = async_compile.triton('triton_poi_fused_slice_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_slice_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_slice_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 3 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 + x1 + (4*y0)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nn/cnnyf7lx5frntsngzjjvmmzmxwoqgf5cmldad4ifa62vlbjvqcbk.py # Topologically Sorted Source Nodes: [tags, getitem_18], Original ATen: [aten._to_copy, aten.slice] # Source node to ATen node mapping: # getitem_18 => slice_1 # tags => convert_element_type # Graph fragment: # %convert_element_type : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%permute_1, torch.int64), kwargs = {}) # %slice_1 : [num_users=2] = call_function[target=torch.ops.aten.slice.Tensor](args = (%convert_element_type, 0, 0, 3), kwargs = {}) triton_poi_fused__to_copy_slice_6 = async_compile.triton('triton_poi_fused__to_copy_slice_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_slice_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_slice_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 3 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6j/c6jtxb5inl36t7vk4uqycgihmocdrrsdlwqwayiqclhsbf2u54zk.py # Topologically Sorted Source Nodes: [mul_4, mul_5, alpha_3, max_4, sub_6, exp_3, sum_4, getitem_20, trans_score_3, score, sum_5, mul_8, score_1, sub_7], Original ATen: [aten.mul, aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.index] # Source node to ATen node mapping: # alpha_3 => add_11 # exp_3 => exp_3 # getitem_20 => index # max_4 => max_4 # mul_4 => mul_4 # mul_5 => mul_5 # mul_8 => mul_8 # score => add_13 # score_1 => add_14 # sub_6 => sub_6 # sub_7 => sub_7 # sum_4 => sum_4 # sum_5 => sum_5 # trans_score_3 => mul_6 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, %view_13), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %view_14), kwargs = {}) # %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_11, 1, True), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %getitem_6), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_4, [%slice_1, %slice_2]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %slice_3), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %slice_5), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_13, [0]), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, %select_11), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, %mul_8), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_3, %add_14), kwargs = {}) triton_poi_fused_add_exp_index_max_mul_sub_sum_7 = async_compile.triton('triton_poi_fused_add_exp_index_max_mul_sub_sum_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_index_max_mul_sub_sum_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 26, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_index_max_mul_sub_sum_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), xmask) tmp20 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (1 + (3*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr4 + (4 + x0), xmask) tmp45 = tl.load(in_ptr0 + (2 + (3*x0)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr1 + (2 + (3*x0)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp58 = tl.load(in_ptr4 + (8 + x0), xmask) tmp67 = tl.load(in_ptr6 + (4*x0), xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr7 + (4*x0), xmask, eviction_policy='evict_last') tmp72 = tl.load(in_ptr8 + (4*x0), xmask, eviction_policy='evict_last') tmp77 = tl.load(in_ptr6 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr7 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp82 = tl.load(in_ptr8 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp86 = tl.load(in_ptr6 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp88 = tl.load(in_ptr7 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp91 = tl.load(in_ptr8 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp95 = tl.load(in_ptr6 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp97 = tl.load(in_ptr7 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp100 = tl.load(in_ptr8 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp117 = tl.load(in_ptr4 + (12 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp7 = tmp6 + tmp1 tmp8 = tmp6 < 0 tmp9 = tl.where(tmp8, tmp7, tmp6) tl.device_assert(((0 <= tmp9) & (tmp9 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp9 < 4") tmp11 = tl.load(in_ptr2 + (tmp9 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp13 = tmp11 * tmp12 tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tl.device_assert(((0 <= tmp17) & (tmp17 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp17 < 4") tmp19 = tl.load(in_ptr5 + (tmp17 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tmp19 * tmp20 tmp22 = tmp13 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tl.device_assert(((0 <= tmp26) & (tmp26 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp26 < 4") tmp29 = tmp28 + tmp1 tmp30 = tmp28 < 0 tmp31 = tl.where(tmp30, tmp29, tmp28) tl.device_assert(((0 <= tmp31) & (tmp31 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 4") tmp33 = tl.load(in_ptr2 + (tmp31 + (4*tmp26)), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp37 = tmp36 + tmp1 tmp38 = tmp36 < 0 tmp39 = tl.where(tmp38, tmp37, tmp36) tl.device_assert(((0 <= tmp39) & (tmp39 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp39 < 4") tmp41 = tl.load(in_ptr5 + (4 + tmp39 + (16*x0)), xmask, eviction_policy='evict_last') tmp42 = tmp41 * tmp12 tmp43 = tmp35 + tmp42 tmp44 = tmp22 + tmp43 tmp46 = tmp45 + tmp1 tmp47 = tmp45 < 0 tmp48 = tl.where(tmp47, tmp46, tmp45) tl.device_assert(((0 <= tmp48) & (tmp48 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp48 < 4") tmp51 = tmp50 + tmp1 tmp52 = tmp50 < 0 tmp53 = tl.where(tmp52, tmp51, tmp50) tl.device_assert(((0 <= tmp53) & (tmp53 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp53 < 4") tmp55 = tl.load(in_ptr2 + (tmp53 + (4*tmp48)), xmask, eviction_policy='evict_last') tmp57 = tmp55 * tmp56 tmp59 = tmp58 + tmp1 tmp60 = tmp58 < 0 tmp61 = tl.where(tmp60, tmp59, tmp58) tl.device_assert(((0 <= tmp61) & (tmp61 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp61 < 4") tmp63 = tl.load(in_ptr5 + (8 + tmp61 + (16*x0)), xmask, eviction_policy='evict_last') tmp64 = tmp63 * tmp34 tmp65 = tmp57 + tmp64 tmp66 = tmp44 + tmp65 tmp68 = tl_math.log(tmp67) tmp70 = tmp68 + tmp69 tmp71 = tmp70 * tmp56 tmp73 = 1.0 tmp74 = tmp73 - tmp56 tmp75 = tmp72 * tmp74 tmp76 = tmp71 + tmp75 tmp78 = tl_math.log(tmp77) tmp80 = tmp78 + tmp79 tmp81 = tmp80 * tmp56 tmp83 = tmp82 * tmp74 tmp84 = tmp81 + tmp83 tmp85 = triton_helpers.maximum(tmp76, tmp84) tmp87 = tl_math.log(tmp86) tmp89 = tmp87 + tmp88 tmp90 = tmp89 * tmp56 tmp92 = tmp91 * tmp74 tmp93 = tmp90 + tmp92 tmp94 = triton_helpers.maximum(tmp85, tmp93) tmp96 = tl_math.log(tmp95) tmp98 = tmp96 + tmp97 tmp99 = tmp98 * tmp56 tmp101 = tmp100 * tmp74 tmp102 = tmp99 + tmp101 tmp103 = triton_helpers.maximum(tmp94, tmp102) tmp104 = tmp76 - tmp103 tmp105 = tl_math.exp(tmp104) tmp106 = tmp84 - tmp103 tmp107 = tl_math.exp(tmp106) tmp108 = tmp105 + tmp107 tmp109 = tmp93 - tmp103 tmp110 = tl_math.exp(tmp109) tmp111 = tmp108 + tmp110 tmp112 = tmp102 - tmp103 tmp113 = tl_math.exp(tmp112) tmp114 = tmp111 + tmp113 tmp115 = tl_math.log(tmp114) tmp116 = tmp115 + tmp103 tmp118 = tmp117 + tmp1 tmp119 = tmp117 < 0 tmp120 = tl.where(tmp119, tmp118, tmp117) tl.device_assert(((0 <= tmp120) & (tmp120 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp120 < 4") tmp122 = tl.load(in_ptr5 + (12 + tmp120 + (16*x0)), xmask, eviction_policy='evict_last') tmp123 = tmp122 * tmp56 tmp124 = tmp123 * tmp56 tmp125 = tmp66 + tmp124 tmp126 = tmp116 - tmp125 tl.store(in_out_ptr0 + (x0), tmp126, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [add, tmp, max_1, sub, exp, sum_1], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_max_sub_sum_0.run(primals_1, primals_4, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_4, tmp_1], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf1, buf0, primals_3, primals_1, primals_4, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, alpha_1, mul_2, mul_3, alpha_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_2.run(buf3, buf2, primals_3, buf1, primals_1, 16, grid=grid(16), stream=stream0) del buf2 buf4 = buf1; del buf1 # reuse buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [add_8, tmp_2, max_3, sub_4, exp_2, sum_3], Original ATen: [aten.add, aten.max, aten.sub, aten.exp, aten.sum] triton_poi_fused_add_exp_max_sub_sum_3.run(buf3, primals_1, primals_4, buf4, buf5, 16, grid=grid(16), stream=stream0) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [tags], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_4.run(primals_2, buf8, 4, 4, grid=grid(4, 4), stream=stream0) del primals_2 buf10 = empty_strided_cuda((3, 4), (1, 3), torch.int64) # Topologically Sorted Source Nodes: [getitem_19], Original ATen: [aten.slice] triton_poi_fused_slice_5.run(buf8, buf10, 3, 4, grid=grid(3, 4), stream=stream0) buf9 = empty_strided_cuda((3, 4), (1, 3), torch.int64) # Topologically Sorted Source Nodes: [tags, getitem_18], Original ATen: [aten._to_copy, aten.slice] triton_poi_fused__to_copy_slice_6.run(buf8, buf9, 3, 4, grid=grid(3, 4), stream=stream0) buf11 = empty_strided_cuda((4, ), (1, ), torch.float32) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [mul_4, mul_5, alpha_3, max_4, sub_6, exp_3, sum_4, getitem_20, trans_score_3, score, sum_5, mul_8, score_1, sub_7], Original ATen: [aten.mul, aten.add, aten.max, aten.sub, aten.exp, aten.sum, aten.index] triton_poi_fused_add_exp_index_max_mul_sub_sum_7.run(buf12, buf9, buf10, primals_4, primals_3, buf8, primals_1, buf5, buf4, buf3, 4, grid=grid(4), stream=stream0) del buf3 del buf4 del buf5 del buf8 return (buf12, primals_1, primals_3, primals_4, buf9, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) def log_sum_exp(x, dim=-1): max_value, _ = x.max(dim=dim, keepdim=True) res = torch.log(torch.sum(torch.exp(x - max_value), dim=dim, keepdim=True) ) + max_value return res.squeeze(dim) class ConditionalRandomField(nn.Module): def __init__(self, tag_size, include_start_end_trans=False, initial_method=None): """ :param tag_size: int, num of tags :param include_start_end_trans: bool, whether to include start/end tag """ super(ConditionalRandomField, self).__init__() self.include_start_end_trans = include_start_end_trans self.tag_size = tag_size self.trans_m = nn.Parameter(torch.randn(tag_size, tag_size)) if self.include_start_end_trans: self.start_scores = nn.Parameter(torch.randn(tag_size)) self.end_scores = nn.Parameter(torch.randn(tag_size)) initial_parameter(self, initial_method) def reset_parameter(self): nn.init.xavier_normal_(self.trans_m) if self.include_start_end_trans: nn.init.normal_(self.start_scores) nn.init.normal_(self.end_scores) def _normalizer_likelihood(self, logits, mask): """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. :param logits:FloatTensor, max_len x batch_size x tag_size :param mask:ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, n_tags = logits.size() alpha = logits[0] if self.include_start_end_trans: alpha += self.start_scores.view(1, -1) for i in range(1, seq_len): emit_score = logits[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags) tmp = alpha.view(batch_size, n_tags, 1) + emit_score + trans_score alpha = log_sum_exp(tmp, 1) * mask[i].view(batch_size, 1 ) + alpha * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: alpha += self.end_scores.view(1, -1) return log_sum_exp(alpha, 1) def _glod_score(self, logits, tags, mask): """ Compute the score for the gold path. :param logits: FloatTensor, max_len x batch_size x tag_size :param tags: LongTensor, max_len x batch_size :param mask: ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, _ = logits.size() batch_idx = torch.arange(batch_size, dtype=torch.long, device= logits.device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=logits.device) trans_score = self.trans_m[tags[:seq_len - 1], tags[1:]] * mask[1:, :] emit_score = logits[seq_idx.view(-1, 1), batch_idx.view(1, -1), tags ] * mask score = trans_score + emit_score[:seq_len - 1, :] score = score.sum(0) + emit_score[-1] * mask[-1] if self.include_start_end_trans: st_scores = self.start_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[0]] last_idx = mask.long().sum(0) - 1 ed_scores = self.end_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[last_idx, batch_idx]] score += st_scores + ed_scores return score def forward(self, feats, tags, mask): """ Calculate the neg log likelihood :param feats:FloatTensor, batch_size x max_len x tag_size :param tags:LongTensor, batch_size x max_len :param mask:ByteTensor batch_size x max_len :return:FloatTensor, batch_size """ feats = feats.transpose(0, 1) tags = tags.transpose(0, 1).long() mask = mask.transpose(0, 1).float() all_path_score = self._normalizer_likelihood(feats, mask) gold_path_score = self._glod_score(feats, tags, mask) return all_path_score - gold_path_score def viterbi_decode(self, data, mask, get_score=False): """ Given a feats matrix, return best decode path and best score. :param data:FloatTensor, batch_size x max_len x tag_size :param mask:ByteTensor batch_size x max_len :param get_score: bool, whether to output the decode score. :return: scores, paths """ batch_size, seq_len, n_tags = data.size() data = data.transpose(0, 1).data mask = mask.transpose(0, 1).data.float() vpath = data.new_zeros((seq_len, batch_size, n_tags), dtype=torch.long) vscore = data[0] if self.include_start_end_trans: vscore += self.start_scores.view(1, -1) for i in range(1, seq_len): prev_score = vscore.view(batch_size, n_tags, 1) cur_score = data[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags).data score = prev_score + trans_score + cur_score best_score, best_dst = score.max(1) vpath[i] = best_dst vscore = best_score * mask[i].view(batch_size, 1) + vscore * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: vscore += self.end_scores.view(1, -1) batch_idx = torch.arange(batch_size, dtype=torch.long, device=data. device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=data.device) lens = mask.long().sum(0) - 1 idxes = (lens.view(1, -1) - seq_idx.view(-1, 1)) % seq_len ans = data.new_empty((seq_len, batch_size), dtype=torch.long) ans_score, last_tags = vscore.max(1) ans[idxes[0], batch_idx] = last_tags for i in range(seq_len - 1): last_tags = vpath[idxes[i], batch_idx, last_tags] ans[idxes[i + 1], batch_idx] = last_tags if get_score: return ans_score, ans.transpose(0, 1) return ans.transpose(0, 1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'tag_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_exp_max_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x2 = xindex // 16 x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr3 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + x2, xmask) tmp24 = tl.load(in_out_ptr0 + x2, xmask) tmp26 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr3 + (x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tmp1 - tmp6 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp3 - tmp6 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp5 - tmp6 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp18 + tmp6 tmp21 = tmp19 * tmp20 tmp23 = tl_math.log(tmp22) tmp25 = tmp23 + tmp24 tmp27 = tmp25 * tmp26 tmp29 = 1.0 tmp30 = tmp29 - tmp26 tmp31 = tmp28 * tmp30 tmp32 = tmp27 + tmp31 tmp33 = tmp29 - tmp20 tmp34 = tmp32 * tmp33 tmp35 = tmp21 + tmp34 tl.store(in_out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_add_exp_max_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__to_copy_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + (x1 + 4 * y0), tmp1, xmask & ymask) @triton.jit def triton_poi_fused_slice_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 3 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 + x1 + 4 * y0), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__to_copy_slice_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 3 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_exp_index_max_mul_sub_sum_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr4 + x0, xmask) tmp20 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr4 + (4 + x0), xmask) tmp45 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr1 + (2 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp56 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp58 = tl.load(in_ptr4 + (8 + x0), xmask) tmp67 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last') tmp72 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last') tmp77 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp79 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp82 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp88 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp91 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp95 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp97 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp100 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp117 = tl.load(in_ptr4 + (12 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp7 = tmp6 + tmp1 tmp8 = tmp6 < 0 tmp9 = tl.where(tmp8, tmp7, tmp6) tl.device_assert((0 <= tmp9) & (tmp9 < 4) | ~xmask, 'index out of bounds: 0 <= tmp9 < 4') tmp11 = tl.load(in_ptr2 + (tmp9 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp13 = tmp11 * tmp12 tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tl.device_assert((0 <= tmp17) & (tmp17 < 4) | ~xmask, 'index out of bounds: 0 <= tmp17 < 4') tmp19 = tl.load(in_ptr5 + (tmp17 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tmp19 * tmp20 tmp22 = tmp13 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tl.device_assert((0 <= tmp26) & (tmp26 < 4) | ~xmask, 'index out of bounds: 0 <= tmp26 < 4') tmp29 = tmp28 + tmp1 tmp30 = tmp28 < 0 tmp31 = tl.where(tmp30, tmp29, tmp28) tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask, 'index out of bounds: 0 <= tmp31 < 4') tmp33 = tl.load(in_ptr2 + (tmp31 + 4 * tmp26), xmask, eviction_policy= 'evict_last') tmp35 = tmp33 * tmp34 tmp37 = tmp36 + tmp1 tmp38 = tmp36 < 0 tmp39 = tl.where(tmp38, tmp37, tmp36) tl.device_assert((0 <= tmp39) & (tmp39 < 4) | ~xmask, 'index out of bounds: 0 <= tmp39 < 4') tmp41 = tl.load(in_ptr5 + (4 + tmp39 + 16 * x0), xmask, eviction_policy ='evict_last') tmp42 = tmp41 * tmp12 tmp43 = tmp35 + tmp42 tmp44 = tmp22 + tmp43 tmp46 = tmp45 + tmp1 tmp47 = tmp45 < 0 tmp48 = tl.where(tmp47, tmp46, tmp45) tl.device_assert((0 <= tmp48) & (tmp48 < 4) | ~xmask, 'index out of bounds: 0 <= tmp48 < 4') tmp51 = tmp50 + tmp1 tmp52 = tmp50 < 0 tmp53 = tl.where(tmp52, tmp51, tmp50) tl.device_assert((0 <= tmp53) & (tmp53 < 4) | ~xmask, 'index out of bounds: 0 <= tmp53 < 4') tmp55 = tl.load(in_ptr2 + (tmp53 + 4 * tmp48), xmask, eviction_policy= 'evict_last') tmp57 = tmp55 * tmp56 tmp59 = tmp58 + tmp1 tmp60 = tmp58 < 0 tmp61 = tl.where(tmp60, tmp59, tmp58) tl.device_assert((0 <= tmp61) & (tmp61 < 4) | ~xmask, 'index out of bounds: 0 <= tmp61 < 4') tmp63 = tl.load(in_ptr5 + (8 + tmp61 + 16 * x0), xmask, eviction_policy ='evict_last') tmp64 = tmp63 * tmp34 tmp65 = tmp57 + tmp64 tmp66 = tmp44 + tmp65 tmp68 = tl_math.log(tmp67) tmp70 = tmp68 + tmp69 tmp71 = tmp70 * tmp56 tmp73 = 1.0 tmp74 = tmp73 - tmp56 tmp75 = tmp72 * tmp74 tmp76 = tmp71 + tmp75 tmp78 = tl_math.log(tmp77) tmp80 = tmp78 + tmp79 tmp81 = tmp80 * tmp56 tmp83 = tmp82 * tmp74 tmp84 = tmp81 + tmp83 tmp85 = triton_helpers.maximum(tmp76, tmp84) tmp87 = tl_math.log(tmp86) tmp89 = tmp87 + tmp88 tmp90 = tmp89 * tmp56 tmp92 = tmp91 * tmp74 tmp93 = tmp90 + tmp92 tmp94 = triton_helpers.maximum(tmp85, tmp93) tmp96 = tl_math.log(tmp95) tmp98 = tmp96 + tmp97 tmp99 = tmp98 * tmp56 tmp101 = tmp100 * tmp74 tmp102 = tmp99 + tmp101 tmp103 = triton_helpers.maximum(tmp94, tmp102) tmp104 = tmp76 - tmp103 tmp105 = tl_math.exp(tmp104) tmp106 = tmp84 - tmp103 tmp107 = tl_math.exp(tmp106) tmp108 = tmp105 + tmp107 tmp109 = tmp93 - tmp103 tmp110 = tl_math.exp(tmp109) tmp111 = tmp108 + tmp110 tmp112 = tmp102 - tmp103 tmp113 = tl_math.exp(tmp112) tmp114 = tmp111 + tmp113 tmp115 = tl_math.log(tmp114) tmp116 = tmp115 + tmp103 tmp118 = tmp117 + tmp1 tmp119 = tmp117 < 0 tmp120 = tl.where(tmp119, tmp118, tmp117) tl.device_assert((0 <= tmp120) & (tmp120 < 4) | ~xmask, 'index out of bounds: 0 <= tmp120 < 4') tmp122 = tl.load(in_ptr5 + (12 + tmp120 + 16 * x0), xmask, eviction_policy='evict_last') tmp123 = tmp122 * tmp56 tmp124 = tmp123 * tmp56 tmp125 = tmp66 + tmp124 tmp126 = tmp116 - tmp125 tl.store(in_out_ptr0 + x0, tmp126, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_max_sub_sum_0[grid(16)](primals_1, primals_4, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_1[grid(64)](buf1, buf0, primals_3, primals_1, primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 triton_poi_fused_add_mul_2[grid(16)](buf3, buf2, primals_3, buf1, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf4 = buf1 del buf1 buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused_add_exp_max_sub_sum_3[grid(16)](buf3, primals_1, primals_4, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused__to_copy_4[grid(4, 4)](primals_2, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf10 = empty_strided_cuda((3, 4), (1, 3), torch.int64) triton_poi_fused_slice_5[grid(3, 4)](buf8, buf10, 3, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((3, 4), (1, 3), torch.int64) triton_poi_fused__to_copy_slice_6[grid(3, 4)](buf8, buf9, 3, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4,), (1,), torch.float32) buf12 = buf11 del buf11 triton_poi_fused_add_exp_index_max_mul_sub_sum_7[grid(4)](buf12, buf9, buf10, primals_4, primals_3, buf8, primals_1, buf5, buf4, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf3 del buf4 del buf5 del buf8 return buf12, primals_1, primals_3, primals_4, buf9, buf10 def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) def log_sum_exp(x, dim=-1): max_value, _ = x.max(dim=dim, keepdim=True) res = torch.log(torch.sum(torch.exp(x - max_value), dim=dim, keepdim=True) ) + max_value return res.squeeze(dim) class ConditionalRandomFieldNew(nn.Module): def __init__(self, tag_size, include_start_end_trans=False, initial_method=None): """ :param tag_size: int, num of tags :param include_start_end_trans: bool, whether to include start/end tag """ super(ConditionalRandomFieldNew, self).__init__() self.include_start_end_trans = include_start_end_trans self.tag_size = tag_size self.trans_m = nn.Parameter(torch.randn(tag_size, tag_size)) if self.include_start_end_trans: self.start_scores = nn.Parameter(torch.randn(tag_size)) self.end_scores = nn.Parameter(torch.randn(tag_size)) initial_parameter(self, initial_method) def reset_parameter(self): nn.init.xavier_normal_(self.trans_m) if self.include_start_end_trans: nn.init.normal_(self.start_scores) nn.init.normal_(self.end_scores) def _normalizer_likelihood(self, logits, mask): """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. :param logits:FloatTensor, max_len x batch_size x tag_size :param mask:ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, n_tags = logits.size() alpha = logits[0] if self.include_start_end_trans: alpha += self.start_scores.view(1, -1) for i in range(1, seq_len): emit_score = logits[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags) tmp = alpha.view(batch_size, n_tags, 1) + emit_score + trans_score alpha = log_sum_exp(tmp, 1) * mask[i].view(batch_size, 1 ) + alpha * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: alpha += self.end_scores.view(1, -1) return log_sum_exp(alpha, 1) def _glod_score(self, logits, tags, mask): """ Compute the score for the gold path. :param logits: FloatTensor, max_len x batch_size x tag_size :param tags: LongTensor, max_len x batch_size :param mask: ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, _ = logits.size() batch_idx = torch.arange(batch_size, dtype=torch.long, device= logits.device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=logits.device) trans_score = self.trans_m[tags[:seq_len - 1], tags[1:]] * mask[1:, :] emit_score = logits[seq_idx.view(-1, 1), batch_idx.view(1, -1), tags ] * mask score = trans_score + emit_score[:seq_len - 1, :] score = score.sum(0) + emit_score[-1] * mask[-1] if self.include_start_end_trans: st_scores = self.start_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[0]] last_idx = mask.long().sum(0) - 1 ed_scores = self.end_scores.view(1, -1).repeat(batch_size, 1)[ batch_idx, tags[last_idx, batch_idx]] score += st_scores + ed_scores return score def viterbi_decode(self, data, mask, get_score=False): """ Given a feats matrix, return best decode path and best score. :param data:FloatTensor, batch_size x max_len x tag_size :param mask:ByteTensor batch_size x max_len :param get_score: bool, whether to output the decode score. :return: scores, paths """ batch_size, seq_len, n_tags = data.size() data = data.transpose(0, 1).data mask = mask.transpose(0, 1).data.float() vpath = data.new_zeros((seq_len, batch_size, n_tags), dtype=torch.long) vscore = data[0] if self.include_start_end_trans: vscore += self.start_scores.view(1, -1) for i in range(1, seq_len): prev_score = vscore.view(batch_size, n_tags, 1) cur_score = data[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n_tags, n_tags).data score = prev_score + trans_score + cur_score best_score, best_dst = score.max(1) vpath[i] = best_dst vscore = best_score * mask[i].view(batch_size, 1) + vscore * (1 - mask[i]).view(batch_size, 1) if self.include_start_end_trans: vscore += self.end_scores.view(1, -1) batch_idx = torch.arange(batch_size, dtype=torch.long, device=data. device) seq_idx = torch.arange(seq_len, dtype=torch.long, device=data.device) lens = mask.long().sum(0) - 1 idxes = (lens.view(1, -1) - seq_idx.view(-1, 1)) % seq_len ans = data.new_empty((seq_len, batch_size), dtype=torch.long) ans_score, last_tags = vscore.max(1) ans[idxes[0], batch_idx] = last_tags for i in range(seq_len - 1): last_tags = vpath[idxes[i], batch_idx, last_tags] ans[idxes[i + 1], batch_idx] = last_tags if get_score: return ans_score, ans.transpose(0, 1) return ans.transpose(0, 1) def forward(self, input_0, input_1, input_2): primals_2 = self.trans_m primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
FengZiYjun/fastNLP
ConditionalRandomField
false
5,180
[ "Apache-2.0" ]
1
3ae73ab0a05d1ceef4a5181516891a8057d7f719
https://github.com/FengZiYjun/fastNLP/tree/3ae73ab0a05d1ceef4a5181516891a8057d7f719
import torch from torch import nn import torch.utils.data import torch.nn.init as init def initial_parameter(net, initial_method=None): """A method used to initialize the weights of PyTorch models. :param net: a PyTorch model :param initial_method: str, one of the following initializations - xavier_uniform - xavier_normal (default) - kaiming_normal, or msra - kaiming_uniform - orthogonal - sparse - normal - uniform """ if initial_method == 'xavier_uniform': init_method = init.xavier_uniform_ elif initial_method == 'xavier_normal': init_method = init.xavier_normal_ elif initial_method == 'kaiming_normal' or initial_method == 'msra': init_method = init.kaiming_normal_ elif initial_method == 'kaiming_uniform': init_method = init.kaiming_uniform_ elif initial_method == 'orthogonal': init_method = init.orthogonal_ elif initial_method == 'sparse': init_method = init.sparse_ elif initial_method == 'normal': init_method = init.normal_ elif initial_method == 'uniform': init_method = init.uniform_ else: init_method = init.xavier_normal_ def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m , nn.Conv3d): if initial_method is not None: init_method(m.weight.data) else: init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.LSTM): for w in m.parameters(): if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) elif hasattr(m, 'weight') and m.weight.requires_grad: init_method(m.weight.data) else: for w in m.parameters(): if w.requires_grad: if len(w.data.size()) > 1: init_method(w.data) else: init.normal_(w.data) net.apply(weights_init) def log_sum_exp(x, dim=-1): max_value, _ = x.max(dim=dim, keepdim=True) res = torch.log(torch.sum(torch.exp(x - max_value), dim=dim, keepdim=True) ) + max_value return res.squeeze(dim) class Model(nn.Module): def __init__(self, tag_size, include_start_end_trans=False, initial_method=None): """ :param tag_size: int, num of tags :param include_start_end_trans: bool, whether to include start/end tag """ super().__init__() self.include_start_end_trans = include_start_end_trans self.tag_size = tag_size self.trans_m = nn.Parameter(torch.randn(tag_size, tag_size)) if self.include_start_end_trans: self.start_scores = nn.Parameter(torch.randn(tag_size)) self.end_scores = nn.Parameter(torch.randn(tag_size)) initial_parameter(self, initial_method) def reset_parameter(self): nn.init.xavier_normal_(self.trans_m) if self.include_start_end_trans: nn.init.normal_(self.start_scores) nn.init.normal_(self.end_scores) def _normalizer_likelihood(self, logits, mask): """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. :param logits:FloatTensor, max_len x batch_size x tag_size :param mask:ByteTensor, max_len x batch_size :return:FloatTensor, batch_size """ seq_len, batch_size, n_tags = logits.size() alpha = logits[0] if self.include_start_end_trans: alpha += self.start_scores.view(1, -1) for i in range(1, seq_len): emit_score = logits[i].view(batch_size, 1, n_tags) trans_score = self.trans_m.view(1, n # ... truncated (>4000 chars) for memory efficiency
STFullyConnected
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5y/c5ycegys2ar7wz6epy2shzbmz2mkdkn4qmpsdbgrzhhu5ufhemkx.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8000 x1 = (xindex // 8000) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x0 + (8064*x1)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2d/c2dldo522mnd74n46v2lrkgeutrjrvjind75q4sshb25ha2k2cdg.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4000 x1 = (xindex // 4000) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x0 + (4096*x1)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4o/c4o2m7ozfavldjjmvy54d5j4mjtrlcvp7qonixpmh5h2yro5ycts.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y_2 => relu_2 # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = (xindex // 2000) tmp0 = tl.load(in_out_ptr0 + (x0 + (2016*x1)), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0 + (2016*x1)), tmp4, xmask) tl.store(out_ptr0 + (x0 + (2048*x1)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ow/cowwhsc5cs2qznzwwjc3lfnep2kbdduqlytkv5dxjwuphmtmuvmg.py # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # y_3 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u7/cu7ffzplxrjnumdhgwnhmfftk3s6t7gc5vwgldniafzjdoyhvfef.py # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # y_3 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (8000, 4), (4, 1)) assert_size_stride(primals_2, (8000, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4000, 8000), (8000, 1)) assert_size_stride(primals_5, (4000, ), (1, )) assert_size_stride(primals_6, (2000, 4000), (4000, 1)) assert_size_stride(primals_7, (2000, ), (1, )) assert_size_stride(primals_8, (4, 2000), (2000, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0); del buf0 # reuse buf11 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1), torch.bool) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 512000, grid=grid(512000), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0); del buf2 # reuse buf10 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf10, 256000, grid=grid(256000), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0); del buf4 # reuse buf9 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf9, 128000, grid=grid(128000), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8000, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4000, 8000), (8000, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2000, 4000), (4000, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 2000), (2000, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import time import torch import numpy as np from torch import nn from torch import optim from torch.nn import functional as F class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class STFullyConnected(Base): """Single task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_class (int): the No. of columns (classes) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_class, is_reg=False): super(STFullyConnected, self).__init__() self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.fc3 = nn.Linear(2000, n_class) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() elif n_class == 1: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() else: self.criterion = nn.CrossEntropyLoss() self.activation = nn.Softmax() self def forward(self, X, istrain=False): """Invoke the class directly as a function Arguments: X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features. istrain (bool, optional): is it invoked during training process (True) or just for prediction (False) Return: y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of classes """ y = F.relu(self.fc0(X)) if istrain: y = self.dropout(y) y = F.relu(self.fc1(y)) if istrain: y = self.dropout(y) y = F.relu(self.fc2(y)) if istrain: y = self.dropout(y) if self.is_reg: y = self.fc3(y) else: y = self.activation(self.fc3(y)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dim': 4, 'n_class': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import time import numpy as np from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8000 x1 = xindex // 8000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 8064 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4000 x1 = xindex // 4000 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask) tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8000, 4), (4, 1)) assert_size_stride(primals_2, (8000,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4000, 8000), (8000, 1)) assert_size_stride(primals_5, (4000,), (1,)) assert_size_stride(primals_6, (2000, 4000), (4000, 1)) assert_size_stride(primals_7, (2000,), (1,)) assert_size_stride(primals_8, (4, 2000), (2000, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1 ), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512000)](buf1, primals_2, buf11, 512000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0) del buf2 buf10 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3, primals_5, buf10, 256000, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(128000)](buf5, primals_7, buf9, 128000, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_4[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0 ), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0 ), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0 ), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11 class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) score = torch.cat(score, dim=0).numpy() return score class STFullyConnectedNew(Base): """Single task DNN classification/regression model. It contains four fully connected layers between which are dropout layer for robustness. Arguments: n_dim (int): the No. of columns (features) for input tensor n_class (int): the No. of columns (classes) for output tensor. is_reg (bool, optional): Regression model (True) or Classification model (False) """ def __init__(self, n_dim, n_class, is_reg=False): super(STFullyConnectedNew, self).__init__() self.dropout = nn.Dropout(0.25) self.fc0 = nn.Linear(n_dim, 8000) self.fc1 = nn.Linear(8000, 4000) self.fc2 = nn.Linear(4000, 2000) self.fc3 = nn.Linear(2000, n_class) self.is_reg = is_reg if is_reg: self.criterion = nn.MSELoss() elif n_class == 1: self.criterion = nn.BCELoss() self.activation = nn.Sigmoid() else: self.criterion = nn.CrossEntropyLoss() self.activation = nn.Softmax() self def forward(self, input_0): primals_1 = self.fc0.weight primals_2 = self.fc0.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
EXYNOS-999/DrugEx
STFullyConnected
false
5,181
[ "MIT" ]
1
f75a90fbc0b9863d594fbff6afecb0f866c076d6
https://github.com/EXYNOS-999/DrugEx/tree/f75a90fbc0b9863d594fbff6afecb0f866c076d6
import time import torch import numpy as np from torch import nn from torch import optim from torch.nn import functional as F class Base(nn.Module): """ This class is the base structure for all of classification/regression DNN models. Mainly, it provides the general methods for training, evaluating model and predcting the given data. """ def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001): """Training the DNN model, similar to the scikit-learn or Keras style. In the end, the optimal value of parameters will also be persisted on the hard drive. Arguments: train_loader (DataLoader): Data loader for training set, including m X n target FloatTensor and m X l label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) valid_loader (DataLoader): Data loader for validation set. The data structure is as same as loader_train. out (str): the file path for the model file (suffix with '.pkg') and log file (suffix with '.log'). epochs(int, optional): The maximum of training epochs (default: 100) lr (float, optional): learning rate (default: 1e-4) """ if 'optim' in self.__dict__: optimizer = self.optim else: optimizer = optim.Adam(self.parameters(), lr=lr) best_loss = np.inf last_save = 0 log = open(out + '.log', 'w') for epoch in range(epochs): time.time() for param_group in optimizer.param_groups: param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10) for i, (Xb, yb) in enumerate(train_loader): Xb, yb = Xb, yb optimizer.zero_grad() y_ = self.forward(Xb, istrain=True) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss = self.criterion(y_, yb) loss.backward() optimizer.step() loss_valid = self.evaluate(valid_loader) None if loss_valid < best_loss: torch.save(self.state_dict(), out + '.pkg') None best_loss = loss_valid last_save = epoch else: None if epoch - last_save > 100: break log.close() self.load_state_dict(torch.load(out + '.pkg')) def evaluate(self, loader): """Evaluating the performance of the DNN model. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, including m X n target FloatTensor and l X n label FloatTensor (m is the No. of sample, n is the No. of features, l is the No. of classes or tasks) Return: loss (float): the average loss value based on the calculation of loss function with given test set. """ loss = 0 for Xb, yb in loader: Xb, yb = Xb, yb y_ = self.forward(Xb) ix = yb == yb yb, y_ = yb[ix], y_[ix] loss += self.criterion(y_, yb).data[0] loss = loss / len(loader) return loss def predict(self, loader): """Predicting the probability of each sample in the given dataset. Arguments: loader (torch.utils.data.DataLoader): data loader for test set, only including m X n target FloatTensor (m is the No. of sample, n is the No. of features) Return: score (ndarray): probability of each sample in the given dataset, it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.) """ score = [] for Xb, yb in loader: Xb = Xb y_ = self.forward(Xb) score.append(y_.detach().cpu()) # ... truncated (>4000 chars) for memory efficiency
MLP_model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/pr/cprthrqz6iotcmrjfcrj7taqntzxisdcjtr54gsuz2ck2kf6kbsr.py # Topologically Sorted Source Nodes: [mlp_output], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # mlp_output => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [mlp_output], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 1), (1, 0), 0), primals_4, out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [mlp_output_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 64, grid=grid(64), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 1), (1, 0), 0), primals_6, out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [mlp_output_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf8, 64, grid=grid(64), stream=stream0) del primals_7 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [forecast_y], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 1), (1, 0), 0), primals_8, alpha=1, beta=1, out=buf7) del primals_9 return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), reinterpret_tensor(buf3, (64, 1), (1, 1), 0), reinterpret_tensor(buf5, (64, 1), (1, 1), 0), primals_8, buf8, primals_6, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MLP_model(nn.Module): def __init__(self, inputsize, layer1, layer2, layer3, device): super().__init__() self.fc1 = nn.Linear(inputsize, layer1) self.fc2 = nn.Linear(layer1, layer2) self.fc3 = nn.Linear(layer2, layer3) self.fc4 = nn.Linear(layer3, 1) self.device = device def forward(self, our_data): """ our_data: [batch_size,1,4000]:[256,4000] output:[256,1] """ mlp_output = nn.functional.relu(self.fc1(our_data)) mlp_output = nn.functional.relu(self.fc2(mlp_output)) mlp_output = nn.functional.relu(self.fc3(mlp_output)) forecast_y = self.fc4(mlp_output) return forecast_y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputsize': 4, 'layer1': 1, 'layer2': 1, 'layer3': 1, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_2, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1), (1, 0), 0), primals_4, out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf3, primals_5, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 1), (1, 0), 0), primals_6, out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf5, primals_7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 1), ( 1, 0), 0), primals_8, alpha=1, beta=1, out=buf7) del primals_9 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), reinterpret_tensor( buf3, (64, 1), (1, 1), 0), reinterpret_tensor(buf5, (64, 1), (1, 1), 0 ), primals_8, buf8, primals_6, buf9, primals_4, buf10 class MLP_modelNew(nn.Module): def __init__(self, inputsize, layer1, layer2, layer3, device): super().__init__() self.fc1 = nn.Linear(inputsize, layer1) self.fc2 = nn.Linear(layer1, layer2) self.fc3 = nn.Linear(layer2, layer3) self.fc4 = nn.Linear(layer3, 1) self.device = device def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
GYMS-PKU/HIgh-Frequency-Predictor
MLP_model
false
5,182
[ "Apache-2.0" ]
1
aac5efa73d6e15d95d1b99d529dcf639fb8181f4
https://github.com/GYMS-PKU/HIgh-Frequency-Predictor/tree/aac5efa73d6e15d95d1b99d529dcf639fb8181f4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, inputsize, layer1, layer2, layer3, device): super().__init__() self.fc1 = nn.Linear(inputsize, layer1) self.fc2 = nn.Linear(layer1, layer2) self.fc3 = nn.Linear(layer2, layer3) self.fc4 = nn.Linear(layer3, 1) self.device = device def forward(self, our_data): """ our_data: [batch_size,1,4000]:[256,4000] output:[256,1] """ mlp_output = nn.functional.relu(self.fc1(our_data)) mlp_output = nn.functional.relu(self.fc2(mlp_output)) mlp_output = nn.functional.relu(self.fc3(mlp_output)) forecast_y = self.fc4(mlp_output) return forecast_y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputsize': 4, 'layer1': 1, 'layer2': 1, 'layer3': 1, 'device': 0}]
_MLP_B
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out_2 => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _MLP_B(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_B, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X): out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class _MLP_BNew(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_BNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
GaelKBertrand/Meliora_DeepLearning
_MLP_B
false
5,183
[ "MIT" ]
1
5618e01066d4d0afcd7dfe074dda91af22b5857c
https://github.com/GaelKBertrand/Meliora_DeepLearning/tree/5618e01066d4d0afcd7dfe074dda91af22b5857c
import torch import torch.nn as nn class Model(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super().__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X): out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 0.5, 4]
GaussianActorNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [phi], Original ATen: [aten.tanh] # Source node to ATen node mapping: # phi => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/53/c5336tes3fejn37nhb2iijuur7spy3qcasflywbbqklxwgjxpcvr.py # Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp] # Source node to ATen node mapping: # std => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {}) triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [phi], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [phi_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp] triton_poi_fused_exp_1.run(primals_8, buf5, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5, reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianActorNet(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, action_scale=1.0, action_gate =None, gpu=False, unit_std=True, hidden_size=64): super(GaussianActorNet, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.action_mean = nn.Linear(hidden_size, action_dim) if unit_std: self.action_log_std = nn.Parameter(torch.zeros(1, action_dim)) else: self.action_std = nn.Linear(hidden_size, action_dim) self.unit_std = unit_std self.action_scale = action_scale self.action_gate = action_gate BasicNet.__init__(self, None, gpu, False) def forward(self, x): x = self.to_torch_variable(x) phi = F.tanh(self.fc1(x)) phi = F.tanh(self.fc2(phi)) mean = self.action_mean(phi) if self.action_gate is not None: mean = self.action_scale * self.action_gate(mean) if self.unit_std: log_std = self.action_log_std.expand_as(mean) std = log_std.exp() else: std = F.softplus(self.action_std(phi)) + 1e-05 log_std = std.log() return mean, std, log_std def predict(self, x): return self.forward(x) def log_density(self, x, mean, log_std, std): var = std.pow(2) log_density = -(x - mean).pow(2) / (2 * var + 1e-05) - 0.5 * torch.log( 2 * Variable(torch.FloatTensor([np.pi])).expand_as(x)) - log_std return log_density.sum(1) def entropy(self, std): return 0.5 * (1 + (2 * std.pow(2) * np.pi + 1e-05).log()).sum(1).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_3, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf5, reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class GaussianActorNetNew(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, action_scale=1.0, action_gate =None, gpu=False, unit_std=True, hidden_size=64): super(GaussianActorNetNew, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.action_mean = nn.Linear(hidden_size, action_dim) if unit_std: self.action_log_std = nn.Parameter(torch.zeros(1, action_dim)) else: self.action_std = nn.Linear(hidden_size, action_dim) self.unit_std = unit_std self.action_scale = action_scale self.action_gate = action_gate BasicNet.__init__(self, None, gpu, False) def predict(self, x): return self.forward(x) def log_density(self, x, mean, log_std, std): var = std.pow(2) log_density = -(x - mean).pow(2) / (2 * var + 1e-05) - 0.5 * torch.log( 2 * Variable(torch.FloatTensor([np.pi])).expand_as(x)) - log_std return log_density.sum(1) def entropy(self, std): return 0.5 * (1 + (2 * std.pow(2) * np.pi + 1e-05).log()).sum(1).mean() def forward(self, input_0): primals_8 = self.action_log_std primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.action_mean.weight primals_7 = self.action_mean.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1], output[2]
G-Flor/deeprl
GaussianActorNet
false
5,184
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class Model(nn.Module, BasicNet): def __init__(self, state_dim, action_dim, action_scale=1.0, action_gate =None, gpu=False, unit_std=True, hidden_size=64): super().__init__() self.fc1 = nn.Linear(state_dim, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.action_mean = nn.Linear(hidden_size, action_dim) if unit_std: self.action_log_std = nn.Parameter(torch.zeros(1, action_dim)) else: self.action_std = nn.Linear(hidden_size, action_dim) self.unit_std = unit_std self.action_scale = action_scale self.action_gate = action_gate BasicNet.__init__(self, None, gpu, False) def forward(self, x): x = self.to_torch_variable(x) phi = F.tanh(self.fc1(x)) phi = F.tanh(self.fc2(phi)) mean = self.action_mean(phi) if self.action_gate is not None: mean = self.action_scale * self.action_gate(mean) if self.unit_std: log_std = self.action_log_std.expand_as(mean) std = log_std.exp() else: std = F.softplus(self.action_std(phi)) + 1e-05 log_std = std.log() return mean, std, log_std def predict(self, x): return self.forward(x) def log_density(self, x, mean, log_std, std): var = std.pow(2) log_density = -(x - mean).pow(2) / (2 * var + 1e-05) - 0.5 * torch.log( 2 * Variable(torch.FloatTensor([np.pi])).expand_as(x)) - log_std return log_density.sum(1) def entropy(self, std): return 0.5 * (1 + (2 * std.pow(2) * np.pi + 1e-05).log()).sum(1).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
_MLP_C
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py # Topologically Sorted Source Nodes: [X], Original ATen: [aten.cat] # Source node to ATen node mapping: # X => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t6/ct6tzc7qfnkzfnoub66azvnnyh3hy6gkxahfs6n3mwiip73xwian.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out_2 => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (2, 4), (4, 1)) assert_size_stride(primals_6, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [X], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, buf3, 512, grid=grid(512), stream=stream0) del buf1 del primals_4 buf4 = empty_strided_cuda((128, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return (reinterpret_tensor(buf4, (4, 8, 4, 2), (64, 8, 2, 1), 0), reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _MLP_C(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_C, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X1, X2): X = torch.cat((X1, X2), 1) out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (2, 4), (4, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_4, buf2, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 buf4 = empty_strided_cuda((128, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return reinterpret_tensor(buf4, (4, 8, 4, 2), (64, 8, 2, 1), 0 ), reinterpret_tensor(buf0, (128, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), primals_5 class _MLP_CNew(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_CNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
GaelKBertrand/Meliora_DeepLearning
_MLP_C
false
5,185
[ "MIT" ]
1
5618e01066d4d0afcd7dfe074dda91af22b5857c
https://github.com/GaelKBertrand/Meliora_DeepLearning/tree/5618e01066d4d0afcd7dfe074dda91af22b5857c
import torch import torch.nn as nn class Model(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super().__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X1, X2): X = torch.cat((X1, X2), 1) out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 0.5, 4]
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/oz/cozo2tvc7hyhhuvn7mvono4mqt4xjxbetoafx6siwgnsijj54xyl.py # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%view_12, [1, 4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_1 = async_compile.triton('triton_poi_fused_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bx/cbxsgautsk4bd7exwaatk4zmdcujosx2bs6glhzldma6whelf3xa.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => exp, sum_1 # masked_fill_ => full_default, where # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default, %bmm), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/k3/ck3ynjvzkegtm2kjok34x3fffzfegirtmypcfcgbywxdahuilxmg.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => div_1, exp # masked_fill_ => full_default, where # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default, %bmm), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex x4 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s7/cs7p2dyxlesdvuyx4owztmqg5sapsarlgzaivin7okeoe6lxygw7.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_18, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/y6/cy6mkjdwes62jaih4dzebyknvxezhquh37cme5cflrxbxff3z675.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_18, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u4/cu4mvhweewrefdurxuza5qfbqlwomkc67kmxkkaurh6luaf2e2fz.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_21,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/he/chevf4d6tadiz3y2a2abr2lj2bvo3wyfykoivwj2s4xedp3vdjuf.py # Topologically Sorted Source Nodes: [tensor_8], Original ATen: [aten.add] # Source node to ATen node mapping: # tensor_8 => add_3 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %view_23), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hn/chnyp4bqchi6cc3qkpikodtjzt7sfs4gz3r2kunqaesb7ahrywso.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_24, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2i/c2it3bfvz5dki6xljhyv3o2tjme4rnp2cbavnrl4nu6kpvzqdzbp.py # Topologically Sorted Source Nodes: [tensor_10], Original ATen: [aten.mul] # Source node to ATen node mapping: # tensor_10 => mul_4 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_25, %unsqueeze), kwargs = {}) triton_poi_fused_mul_10 = async_compile.triton('triton_poi_fused_mul_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_4, buf1, 16, 4, grid=grid(16, 4), stream=stream0) del primals_4 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf3, primals_8, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_6, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_prod], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] triton_poi_fused_repeat_1.run(primals_2, buf7, 64, grid=grid(64), stream=stream0) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0); del buf2 # reuse buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_2.run(buf7, buf6, buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_3.run(buf10, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [attentioned], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_5.run(primals_1, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0); del buf17 # reuse buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_7.run(buf18, primals_14, buf24, 64, grid=grid(64), stream=stream0) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [tensor_8], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf20, buf16, primals_16, 64, grid=grid(64), stream=stream0) del primals_16 buf21 = buf15; del buf15 # reuse buf22 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensor_10], Original ATen: [aten.mul] triton_poi_fused_mul_10.run(buf20, buf21, buf22, primals_17, primals_18, primals_2, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_2, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch import nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerEncoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, tensor, mask): tensor = tensor + self.dropout(self.attention(tensor, mask=mask)) tensor = _normalize(tensor, self.norm1) tensor = tensor + self.dropout(self.ffn(tensor)) tensor = _normalize(tensor, self.norm2) tensor *= mask.unsqueeze(-1).float() return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_4, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_8, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_6, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(64)](primals_2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, primals_2, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_2, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor( buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerEncoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0, input_1): primals_2 = self.attention.q_lin.weight primals_4 = self.attention.q_lin.bias primals_3 = self.attention.k_lin.weight primals_6 = self.attention.k_lin.bias primals_5 = self.attention.v_lin.weight primals_8 = self.attention.v_lin.bias primals_7 = self.attention.out_lin.weight primals_10 = self.attention.out_lin.bias primals_11 = self.norm1.weight primals_12 = self.norm1.bias primals_9 = self.ffn.lin1.weight primals_14 = self.ffn.lin1.bias primals_13 = self.ffn.lin2.weight primals_16 = self.ffn.lin2.bias primals_17 = self.norm2.weight primals_18 = self.norm2.bias primals_1 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
FrankVerhoef/Persona-Dialogue-Generation
TransformerEncoderLayer
false
5,186
[ "MIT" ]
1
ffd8413c2e8b6446097902dd1c496aeb24b852b4
https://github.com/FrankVerhoef/Persona-Dialogue-Generation/tree/ffd8413c2e8b6446097902dd1c496aeb24b852b4
import math import torch import torch.nn.functional as F from torch import nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super().__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super().__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class Model(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(p=dropout) def forward(self, tensor, mask): tensor = tensor + self.dropout(self.attention(tensor, mask=mask)) tensor = _normalize(tensor, self.norm1) tensor = tensor + self.dropout(self.ffn(tensor)) tensor = _normalize(tensor, self.norm2) tensor *= mask.unsqueeze(-1).float() # ... truncated (>4000 chars) for memory efficiency
ResidualDenseBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5i/c5icgszsmzzz6scnewpijmeu66hhcnz77ph7igiddh765aw6r7tr.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 96 x0 = xindex % 4096 x2 = (xindex // 393216) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (131072*x2)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + ((-64) + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp6, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp5, tmp18) tl.store(out_ptr0 + (x3), tmp19, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3w/c3wk4rwnrsnr4gmyth3hio7xwmgrpmqs4uenaletytzd3iyq6vvp.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 128 x0 = xindex % 4096 x2 = (xindex // 524288) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (131072*x2)), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + ((-64) + x1), tmp9, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tl.load(in_ptr3 + (x0 + (4096*((-96) + x1)) + (131072*x2)), tmp20, other=0.0) tmp24 = tl.load(in_ptr4 + ((-96) + x1), tmp20, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tmp25 > tmp13 tmp27 = tmp25 * tmp15 tmp28 = tl.where(tmp26, tmp25, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp20, tmp28, tmp29) tmp31 = tl.where(tmp9, tmp19, tmp30) tmp32 = tl.where(tmp4, tmp5, tmp31) tl.store(out_ptr0 + (x3), tmp32, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2r/c2rqqtyzbql3t7c5cwopzfw7emthunmmfinwngoit7fltetzafgz.py # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1, %where_2], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2621440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 160 x0 = xindex % 4096 x2 = (xindex // 655360) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (131072*x2)), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + ((-64) + x1), tmp9, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr3 + (x0 + (4096*((-96) + x1)) + (131072*x2)), tmp23, other=0.0) tmp25 = tl.load(in_ptr4 + ((-96) + x1), tmp23, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp13 tmp28 = tmp26 * tmp15 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp0 >= tmp21 tmp33 = tl.full([1], 160, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tl.load(in_ptr5 + (x0 + (4096*((-128) + x1)) + (131072*x2)), tmp32, other=0.0) tmp36 = tl.load(in_ptr6 + ((-128) + x1), tmp32, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tmp37 > tmp13 tmp39 = tmp37 * tmp15 tmp40 = tl.where(tmp38, tmp37, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp32, tmp40, tmp41) tmp43 = tl.where(tmp23, tmp31, tmp42) tmp44 = tl.where(tmp9, tmp19, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + (x3), tmp45, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/j2/cj26bsblk3vsepnyamagvkjh7epw4anmcc7il5aezu6wbv4vtlgr.py # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1, %where_2, %where_3], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3145728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 192 x0 = xindex % 4096 x2 = (xindex // 786432) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (131072*x2)), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + ((-64) + x1), tmp9, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr3 + (x0 + (4096*((-96) + x1)) + (131072*x2)), tmp23, other=0.0) tmp25 = tl.load(in_ptr4 + ((-96) + x1), tmp23, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp13 tmp28 = tmp26 * tmp15 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp0 >= tmp21 tmp33 = tl.full([1], 160, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tl.load(in_ptr5 + (x0 + (4096*((-128) + x1)) + (131072*x2)), tmp35, other=0.0) tmp37 = tl.load(in_ptr6 + ((-128) + x1), tmp35, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tmp38 > tmp13 tmp40 = tmp38 * tmp15 tmp41 = tl.where(tmp39, tmp38, tmp40) tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp35, tmp41, tmp42) tmp44 = tmp0 >= tmp33 tmp45 = tl.full([1], 192, tl.int64) tmp46 = tmp0 < tmp45 tmp47 = tl.load(in_ptr7 + (x0 + (4096*((-160) + x1)) + (131072*x2)), tmp44, other=0.0) tmp48 = tl.load(in_ptr8 + ((-160) + x1), tmp44, eviction_policy='evict_last', other=0.0) tmp49 = tmp47 + tmp48 tmp50 = tmp49 > tmp13 tmp51 = tmp49 * tmp15 tmp52 = tl.where(tmp50, tmp49, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp44, tmp52, tmp53) tmp55 = tl.where(tmp35, tmp43, tmp54) tmp56 = tl.where(tmp23, tmp31, tmp55) tmp57 = tl.where(tmp9, tmp19, tmp56) tmp58 = tl.where(tmp4, tmp5, tmp57) tl.store(out_ptr0 + (x3), tmp58, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ao/caomncnid3kpxewv2biaux4wn6aq7chzedvzfrjvgb4nxjfcd3i4.py # Topologically Sorted Source Nodes: [x5, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul_4 # x5 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_4), kwargs = {}) triton_poi_fused_add_convolution_mul_4 = async_compile.triton('triton_poi_fused_add_convolution_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_out_ptr0 + (x3), None) tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.2 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/so/csob5f5gihtgxmcfdkbvlkmaytf6h7aanlunprqwxuq3peoe4znw.py # Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x4 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_4, (32, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 160, 3, 3), (1440, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (64, 192, 3, 3), (1728, 9, 3, 1)) assert_size_stride(primals_11, (64, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_3, buf0, primals_2, buf1, 1572864, grid=grid(1572864), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_3, buf0, primals_2, buf2, primals_5, buf3, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = empty_strided_cuda((4, 160, 64, 64), (655360, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf5, 2621440, grid=grid(2621440), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 192, 64, 64), (786432, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 3145728, grid=grid(3145728), stream=stream0) # Topologically Sorted Source Nodes: [x5], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x5, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add] triton_poi_fused_add_convolution_mul_4.run(buf9, primals_3, primals_11, 1048576, grid=grid(1048576), stream=stream0) del primals_11 buf10 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf6, primals_9, buf10, 524288, grid=grid(524288), stream=stream0) del buf6 del primals_9 buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf4, primals_7, buf11, 524288, grid=grid(524288), stream=stream0) del buf4 del primals_7 buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf2, primals_5, buf12, 524288, grid=grid(524288), stream=stream0) del buf2 del primals_5 buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf0, primals_2, buf13, 524288, grid=grid(524288), stream=stream0) del buf0 del primals_2 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 160, 3, 3), (1440, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ResidualDenseBlock(nn.Module): def __init__(self, channels=64, kernel_size=3, growth=32): super().__init__() self.conv2d_1 = self.conv2d(channels, growth, kernel_size, growth, 0) self.conv2d_2 = self.conv2d(channels, growth, kernel_size, growth, 1) self.conv2d_3 = self.conv2d(channels, growth, kernel_size, growth, 2) self.conv2d_4 = self.conv2d(channels, growth, kernel_size, growth, 3) self.conv2d_5 = self.conv2d(channels, channels, kernel_size, growth, 4) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @staticmethod def conv2d(in_channels, out_channels, kernel_size, growth, factor): return nn.Conv2d(in_channels=in_channels + factor * growth, out_channels=out_channels, kernel_size=kernel_size, padding= kernel_size // 2) def forward(self, input_data): x1 = self.relu(self.conv2d_1(input_data)) x2 = self.relu(self.conv2d_2(torch.cat((input_data, x1), 1))) x3 = self.relu(self.conv2d_3(torch.cat((input_data, x1, x2), 1))) x4 = self.relu(self.conv2d_4(torch.cat((input_data, x1, x2, x3), 1))) x5 = self.conv2d_5(torch.cat((input_data, x1, x2, x3, x4), 1)) return input_data + x5 * 0.2 def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 96 x0 = xindex % 4096 x2 = xindex // 393216 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 96, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-64 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp6, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp5, tmp18) tl.store(out_ptr0 + x3, tmp19, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tl.full([1], 128, tl.int64) tmp23 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp20, other=0.0) tmp24 = tl.load(in_ptr4 + (-96 + x1), tmp20, eviction_policy= 'evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tmp25 > tmp13 tmp27 = tmp25 * tmp15 tmp28 = tl.where(tmp26, tmp25, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp20, tmp28, tmp29) tmp31 = tl.where(tmp9, tmp19, tmp30) tmp32 = tl.where(tmp4, tmp5, tmp31) tl.store(out_ptr0 + x3, tmp32, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 160 x0 = xindex % 4096 x2 = xindex // 655360 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp23, other=0.0) tmp25 = tl.load(in_ptr4 + (-96 + x1), tmp23, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp13 tmp28 = tmp26 * tmp15 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp0 >= tmp21 tl.full([1], 160, tl.int64) tmp35 = tl.load(in_ptr5 + (x0 + 4096 * (-128 + x1) + 131072 * x2), tmp32, other=0.0) tmp36 = tl.load(in_ptr6 + (-128 + x1), tmp32, eviction_policy= 'evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tmp37 > tmp13 tmp39 = tmp37 * tmp15 tmp40 = tl.where(tmp38, tmp37, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp32, tmp40, tmp41) tmp43 = tl.where(tmp23, tmp31, tmp42) tmp44 = tl.where(tmp9, tmp19, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + x3, tmp45, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 192 x0 = xindex % 4096 x2 = xindex // 786432 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 96, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9, other=0.0) tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 128, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp23, other=0.0) tmp25 = tl.load(in_ptr4 + (-96 + x1), tmp23, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 > tmp13 tmp28 = tmp26 * tmp15 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp23, tmp29, tmp30) tmp32 = tmp0 >= tmp21 tmp33 = tl.full([1], 160, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tl.load(in_ptr5 + (x0 + 4096 * (-128 + x1) + 131072 * x2), tmp35, other=0.0) tmp37 = tl.load(in_ptr6 + (-128 + x1), tmp35, eviction_policy= 'evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tmp38 > tmp13 tmp40 = tmp38 * tmp15 tmp41 = tl.where(tmp39, tmp38, tmp40) tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp35, tmp41, tmp42) tmp44 = tmp0 >= tmp33 tl.full([1], 192, tl.int64) tmp47 = tl.load(in_ptr7 + (x0 + 4096 * (-160 + x1) + 131072 * x2), tmp44, other=0.0) tmp48 = tl.load(in_ptr8 + (-160 + x1), tmp44, eviction_policy= 'evict_last', other=0.0) tmp49 = tmp47 + tmp48 tmp50 = tmp49 > tmp13 tmp51 = tmp49 * tmp15 tmp52 = tl.where(tmp50, tmp49, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp44, tmp52, tmp53) tmp55 = tl.where(tmp35, tmp43, tmp54) tmp56 = tl.where(tmp23, tmp31, tmp55) tmp57 = tl.where(tmp9, tmp19, tmp56) tmp58 = tl.where(tmp4, tmp5, tmp57) tl.store(out_ptr0 + x3, tmp58, None) @triton.jit def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.2 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_4, (32, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 160, 3, 3), (1440, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (64, 192, 3, 3), (1728, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1572864)](primals_3, buf0, primals_2, buf1, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(2097152)](primals_3, buf0, primals_2, buf2, primals_5, buf3, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = empty_strided_cuda((4, 160, 64, 64), (655360, 4096, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(2621440)](primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf5, 2621440, XBLOCK=1024, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 192, 64, 64), (786432, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(3145728)](primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 3145728, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_add_convolution_mul_4[grid(1048576)](buf9, primals_3, primals_11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf6, primals_9, buf10, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf6 del primals_9 buf11 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf4, primals_7, buf11, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf4 del primals_7 buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf2, primals_5, buf12, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf2 del primals_5 buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid( 524288)](buf0, primals_2, buf13, 524288, XBLOCK=1024, num_warps =4, num_stages=1) del buf0 del primals_2 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13) class ResidualDenseBlockNew(nn.Module): def __init__(self, channels=64, kernel_size=3, growth=32): super().__init__() self.conv2d_1 = self.conv2d(channels, growth, kernel_size, growth, 0) self.conv2d_2 = self.conv2d(channels, growth, kernel_size, growth, 1) self.conv2d_3 = self.conv2d(channels, growth, kernel_size, growth, 2) self.conv2d_4 = self.conv2d(channels, growth, kernel_size, growth, 3) self.conv2d_5 = self.conv2d(channels, channels, kernel_size, growth, 4) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @staticmethod def conv2d(in_channels, out_channels, kernel_size, growth, factor): return nn.Conv2d(in_channels=in_channels + factor * growth, out_channels=out_channels, kernel_size=kernel_size, padding= kernel_size // 2) def forward(self, input_0): primals_1 = self.conv2d_1.weight primals_2 = self.conv2d_1.bias primals_4 = self.conv2d_2.weight primals_5 = self.conv2d_2.bias primals_6 = self.conv2d_3.weight primals_7 = self.conv2d_3.bias primals_8 = self.conv2d_4.weight primals_9 = self.conv2d_4.bias primals_10 = self.conv2d_5.weight primals_11 = self.conv2d_5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Frognar/Super-Resolution
ResidualDenseBlock
false
5,187
[ "MIT" ]
1
406b909d71e156aa11ee589698744e3ad9abfee7
https://github.com/Frognar/Super-Resolution/tree/406b909d71e156aa11ee589698744e3ad9abfee7
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, channels=64, kernel_size=3, growth=32): super().__init__() self.conv2d_1 = self.conv2d(channels, growth, kernel_size, growth, 0) self.conv2d_2 = self.conv2d(channels, growth, kernel_size, growth, 1) self.conv2d_3 = self.conv2d(channels, growth, kernel_size, growth, 2) self.conv2d_4 = self.conv2d(channels, growth, kernel_size, growth, 3) self.conv2d_5 = self.conv2d(channels, channels, kernel_size, growth, 4) self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @staticmethod def conv2d(in_channels, out_channels, kernel_size, growth, factor): return nn.Conv2d(in_channels=in_channels + factor * growth, out_channels=out_channels, kernel_size=kernel_size, padding= kernel_size // 2) def forward(self, input_data): x1 = self.relu(self.conv2d_1(input_data)) x2 = self.relu(self.conv2d_2(torch.cat((input_data, x1), 1))) x3 = self.relu(self.conv2d_3(torch.cat((input_data, x1, x2), 1))) x4 = self.relu(self.conv2d_4(torch.cat((input_data, x1, x2, x3), 1))) x5 = self.conv2d_5(torch.cat((input_data, x1, x2, x3, x4), 1)) return input_data + x5 * 0.2 def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return []
SentenceEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vu/cvuv73ijvl45rys2kvk2dud7shg4nznn622fzyldpf7pmxppx3o5.py # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.tanh] # Source node to ATen node mapping: # hidden => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [atten_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # atten_weights => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [atten_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # atten_weights => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [atten_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [atten_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [sentence_embedding], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), primals_3, out=buf5) del buf4 return (buf5, primals_3, buf1, buf2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SentenceEmbedding(BaseSelfAttention): def __init__(self, embedding_dim, hidden_dim, num_annotations): super(SentenceEmbedding, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.num_annotations = num_annotations self.initialize_layers() def initialize_layers(self): self.Ws1 = nn.Linear(self.embedding_dim, self.hidden_dim) self.Ws2 = nn.Linear(self.hidden_dim, self.num_annotations) self.init_linear(self.Ws1) self.init_linear(self.Ws2) def forward(self, word_embeddings): """ Args: word_embeddings: (batch_size, doc_maxlen, embedding_dim) Output: sentence_embedding: (batch_size, num_annotations, embedding_dim) """ hidden = F.tanh(self.Ws1(word_embeddings)) atten_weights = F.softmax(self.Ws2(hidden), dim=2) atten_weights = atten_weights.transpose(1, 2) sentence_embedding = atten_weights.bmm(word_embeddings) return sentence_embedding def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embedding_dim': 4, 'hidden_dim': 4, 'num_annotations': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), primals_3, out=buf5) del buf4 return buf5, primals_3, buf1, buf2, primals_4 class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SentenceEmbeddingNew(BaseSelfAttention): def __init__(self, embedding_dim, hidden_dim, num_annotations): super(SentenceEmbeddingNew, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.num_annotations = num_annotations self.initialize_layers() def initialize_layers(self): self.Ws1 = nn.Linear(self.embedding_dim, self.hidden_dim) self.Ws2 = nn.Linear(self.hidden_dim, self.num_annotations) self.init_linear(self.Ws1) self.init_linear(self.Ws2) def forward(self, input_0): primals_1 = self.Ws1.weight primals_2 = self.Ws1.bias primals_4 = self.Ws2.weight primals_5 = self.Ws2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Gan-Tu/ganutils
SentenceEmbedding
false
5,188
[ "MIT" ]
1
203c703cbba0345f9cfe23b03e1e3981f03e43db
https://github.com/Gan-Tu/ganutils/tree/203c703cbba0345f9cfe23b03e1e3981f03e43db
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseSelfAttention(nn.Module): def __init__(self): super().__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class Model(BaseSelfAttention): def __init__(self, embedding_dim, hidden_dim, num_annotations): super().__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.num_annotations = num_annotations self.initialize_layers() def initialize_layers(self): self.Ws1 = nn.Linear(self.embedding_dim, self.hidden_dim) self.Ws2 = nn.Linear(self.hidden_dim, self.num_annotations) self.init_linear(self.Ws1) self.init_linear(self.Ws2) def forward(self, word_embeddings): """ Args: word_embeddings: (batch_size, doc_maxlen, embedding_dim) Output: sentence_embedding: (batch_size, num_annotations, embedding_dim) """ hidden = F.tanh(self.Ws1(word_embeddings)) atten_weights = F.softmax(self.Ws2(hidden), dim=2) atten_weights = atten_weights.transpose(1, 2) sentence_embedding = atten_weights.bmm(word_embeddings) return sentence_embedding def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
GFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (128, 32), (32, 1)) assert_size_stride(primals_7, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 2048, grid=grid(2048), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 128), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn from torch import optim class GFunction(nn.Module): def __init__(self, obs_size, num_outputs=128): super().__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.last(x) return x def train_model(self, c_out, next_state): loss = nn.MSELoss()(c_out, self.forward(next_state)) loss.backward() self.optimizer.step() return loss.item() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (128, 32), (32, 1)) assert_size_stride(primals_7, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 128), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6 class GFunctionNew(nn.Module): def __init__(self, obs_size, num_outputs=128): super().__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def train_model(self, c_out, next_state): loss = nn.MSELoss()(c_out, self.forward(next_state)) loss.backward() self.optimizer.step() return loss.item() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.last.weight primals_7 = self.last.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Deepest-Project/agent57_from_ngu
GFunction
false
5,189
[ "MIT" ]
1
2f596024c7538cfaa5cf63cde1b77f8a1c22d208
https://github.com/Deepest-Project/agent57_from_ngu/tree/2f596024c7538cfaa5cf63cde1b77f8a1c22d208
import torch import torch.nn.functional as F from torch import nn from torch import optim class Model(nn.Module): def __init__(self, obs_size, num_outputs=128): super().__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.last(x) return x def train_model(self, c_out, next_state): loss = nn.MSELoss()(c_out, self.forward(next_state)) loss.backward() self.optimizer.step() return loss.item() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
UpSample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/uy/cuykwspdedcweei632sohoszydcdycwgfpbqfmts2rz3pbme75y3.py # Topologically Sorted Source Nodes: [up_x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] # Source node to ATen node mapping: # up_x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1.0), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x6 = xindex x4 = (xindex // 48) x7 = xindex % 48 tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(out_ptr1 + (x7 + (64*x4)), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/he/che6nd6gb4vfumohfsklsm5klcp45g4jlpyy34ahgnzt6epbx2ro.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_4, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (64*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/p6/cp6kjjnbx6x6vx773fn74my6orskkocw7j5ex7lhq72nsdcqflhu.py # Topologically Sorted Source Nodes: [conv2d, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d => convolution # leaky_relu => gt, mul_5, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_5), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [up_x], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(primals_2, buf1, 192, grid=grid(192), stream=stream0) del primals_2 buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, buf2, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf4, primals_4, buf5, buf6, 256, grid=grid(256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf9 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, leaky_relu_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_6, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf7 del primals_6 return (buf9, primals_3, primals_5, buf3, buf5, buf6, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F import torch.nn as nn import torch.nn.functional as F class UpSample(nn.Sequential): def __init__(self, skip_input, output_features): super().__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, x, concat_with): up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size (3)], mode='bilinear', align_corners=True) return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch. cat([up_x, concat_with], dim=1))))) def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {'skip_input': 4, 'output_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 48 x7 = xindex % 48 tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(out_ptr1 + (x7 + 64 * x4), tmp38, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (192)](primals_2, buf1, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48) triton_poi_fused_cat_1[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf4, primals_4, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf9 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf7, primals_6, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_6 return buf9, primals_3, primals_5, buf3, buf5, buf6, buf8 class UpSampleNew(nn.Sequential): def __init__(self, skip_input, output_features): super().__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, input_0, input_1): primals_3 = self.convA.weight primals_4 = self.convA.bias primals_5 = self.convB.weight primals_6 = self.convB.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
BlairLee/dataset-insights
UpSample
false
5,190
[ "Apache-2.0" ]
1
892e2ed3a2facf97cfa3a883700830d959a0c49b
https://github.com/BlairLee/dataset-insights/tree/892e2ed3a2facf97cfa3a883700830d959a0c49b
import torch from torchvision.transforms import functional as F import torch.nn as nn import torch.nn.functional as F class Model(nn.Sequential): def __init__(self, skip_input, output_features): super().__init__() self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluA = nn.LeakyReLU(0.2) self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1) self.leakyreluB = nn.LeakyReLU(0.2) def forward(self, x, concat_with): up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size (3)], mode='bilinear', align_corners=True) return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch. cat([up_x, concat_with], dim=1))))) def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [4, 4]
LastLevelMaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7x/c7xzocag6hze7uuiyz32ow2ikcanvueomksqpljyhexuxldxtjgh.py # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool2d => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torchvision.transforms import functional as F from torch import nn import torch.nn.functional as F class LastLevelMaxPool(nn.Module): def forward(self, x): return [F.max_pool2d(x, 1, 2, 0)] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class LastLevelMaxPoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CV-Rookie/EmbedMask
LastLevelMaxPool
false
5,191
[ "MIT" ]
1
3b4d9fb4e0b6112dc501708184ff684dfb45f3f0
https://github.com/CV-Rookie/EmbedMask/tree/3b4d9fb4e0b6112dc501708184ff684dfb45f3f0
import torch import torch.utils.data from torchvision.transforms import functional as F from torch import nn import torch.nn.functional as F class Model(nn.Module): def forward(self, x): return [F.max_pool2d(x, 1, 2, 0)] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SelfAttentive
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yn/cyn75avd5lflbyn47bvz2yebt4cajzsnfmrfb4lylb7yai22gyi5.py # Topologically Sorted Source Nodes: [hbar], Original ATen: [aten.tanh] # Source node to ATen node mapping: # hbar => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mm,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alphas_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alphas_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (200, 4), (4, 1)) assert_size_stride(primals_3, (1, 200), (200, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 200), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [hbar], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, 3200, grid=grid(3200), stream=stream0) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (200, 1), (1, 200), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [alphas_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0), primals_1, out=buf5) return (buf5, reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0), primals_1, buf1, buf4, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 200), (200, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from sklearn.metrics import * class SelfAttentive(nn.Module): def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2): super(SelfAttentive, self).__init__() self.drop = nn.Dropout(dropout) self.ws1 = nn.Linear(hidden_size, att_unit, bias=False) self.ws2 = nn.Linear(att_unit, att_hops, bias=False) self.tanh = nn.Tanh() self.softmax = nn.Softmax() self.attention_hops = att_hops def forward(self, rnn_out, mask=None): outp = rnn_out size = outp.size() compressed_embeddings = outp.reshape(-1, size[2]) hbar = self.tanh(self.ws1(self.drop(compressed_embeddings))) alphas = self.ws2(hbar).view(size[0], size[1], -1) alphas = torch.transpose(alphas, 1, 2).contiguous() if mask is not None: mask = mask.squeeze(2) concatenated_mask = [mask for i in range(self.attention_hops)] concatenated_mask = torch.cat(concatenated_mask, 1) penalized_alphas = alphas + concatenated_mask else: penalized_alphas = alphas alphas = self.softmax(penalized_alphas.view(-1, size[1])) alphas = alphas.view(size[0], self.attention_hops, size[1]) return torch.bmm(alphas, outp), alphas def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (200, 4), (4, 1)) assert_size_stride(primals_3, (1, 200), (200, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 200), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(3200)](buf1, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (200, 1), (1, 200), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), primals_1, out=buf5) return buf5, reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), primals_1, buf1, buf4, primals_3 class SelfAttentiveNew(nn.Module): def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2): super(SelfAttentiveNew, self).__init__() self.drop = nn.Dropout(dropout) self.ws1 = nn.Linear(hidden_size, att_unit, bias=False) self.ws2 = nn.Linear(att_unit, att_hops, bias=False) self.tanh = nn.Tanh() self.softmax = nn.Softmax() self.attention_hops = att_hops def forward(self, input_0): primals_2 = self.ws1.weight primals_3 = self.ws2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Dio990521/LSTM_emo_classifier
SelfAttentive
false
5,192
[ "MIT" ]
1
aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
https://github.com/Dio990521/LSTM_emo_classifier/tree/aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
import torch import torch.nn as nn from sklearn.metrics import * class Model(nn.Module): def __init__(self, hidden_size, att_hops=1, att_unit=200, dropout=0.2): super().__init__() self.drop = nn.Dropout(dropout) self.ws1 = nn.Linear(hidden_size, att_unit, bias=False) self.ws2 = nn.Linear(att_unit, att_hops, bias=False) self.tanh = nn.Tanh() self.softmax = nn.Softmax() self.attention_hops = att_hops def forward(self, rnn_out, mask=None): outp = rnn_out size = outp.size() compressed_embeddings = outp.reshape(-1, size[2]) hbar = self.tanh(self.ws1(self.drop(compressed_embeddings))) alphas = self.ws2(hbar).view(size[0], size[1], -1) alphas = torch.transpose(alphas, 1, 2).contiguous() if mask is not None: mask = mask.squeeze(2) concatenated_mask = [mask for i in range(self.attention_hops)] concatenated_mask = torch.cat(concatenated_mask, 1) penalized_alphas = alphas + concatenated_mask else: penalized_alphas = alphas alphas = self.softmax(penalized_alphas.view(-1, size[1])) alphas = alphas.view(size[0], self.attention_hops, size[1]) return torch.bmm(alphas, outp), alphas def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [H], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_energies], Original ATen: [aten.bmm] extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, primals_3, out=buf4) return (buf4, primals_3, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SelfAttention(BaseSelfAttention): def __init__(self, hidden_dim, scoring='general'): super(SelfAttention, self).__init__() self.scoring = scoring self.hidden_dim = hidden_dim self.initialize_layers() def initialize_layers(self): if self.scoring == 'general': self.W = nn.Linear(self.hidden_dim, self.hidden_dim) self.init_linear(self.W) elif self.scoring == 'concat': self.W = nn.Linear(2 * self.hidden_dim, self.hidden_dim) self.v = nn.Linear(self.hidden_dim, 1) self.init_linear(self.W) self.init_linear(self.v) elif self.scoring == 'dot': pass else: raise RuntimeError('Unrecognized attention scoring method: %s' % self.scoring) def forward(self, hidden_outputs): scores = self.score(hidden_outputs) context = scores.bmm(hidden_outputs) return context def score(self, hidden_outputs): if self.scoring == 'dot': H = hidden_outputs.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'general': H = self.W(hidden_outputs) H = H.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'concat': H = hidden_outputs.transpose(1, 2) scores = [] batch_size, doc_maxlen, hidden_dim = hidden_outputs.shape for doc_idx in range(H.shape[-1]): h_t = hidden_outputs[:, doc_idx, :] h_t = h_t.unsqueeze(1) h_t = h_t.repeat(1, doc_maxlen, 1) H_t = torch.cat((h_t, hidden_outputs), dim=2) H_t = self.W(H_t) H_t = torch.nn.functional.tanh(H_t) H_t = self.v(H_t) H_t = H_t.view(batch_size, doc_maxlen) scores.append(H_t) scores = torch.stack(scores) scores = scores.transpose(0, 1) scores = scores / torch.sqrt(torch.Tensor([hidden_dim])) scores = F.softmax(scores, dim=2) return scores else: raise RuntimeError('Unrecognized scoring method: %s' % self.scoring ) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 extern_kernels.bmm(buf3, primals_3, out=buf4) return buf4, primals_3, buf3 class BaseSelfAttention(nn.Module): def __init__(self): super(BaseSelfAttention, self).__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class SelfAttentionNew(BaseSelfAttention): def __init__(self, hidden_dim, scoring='general'): super(SelfAttentionNew, self).__init__() self.scoring = scoring self.hidden_dim = hidden_dim self.initialize_layers() def initialize_layers(self): if self.scoring == 'general': self.W = nn.Linear(self.hidden_dim, self.hidden_dim) self.init_linear(self.W) elif self.scoring == 'concat': self.W = nn.Linear(2 * self.hidden_dim, self.hidden_dim) self.v = nn.Linear(self.hidden_dim, 1) self.init_linear(self.W) self.init_linear(self.v) elif self.scoring == 'dot': pass else: raise RuntimeError('Unrecognized attention scoring method: %s' % self.scoring) def score(self, hidden_outputs): if self.scoring == 'dot': H = hidden_outputs.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'general': H = self.W(hidden_outputs) H = H.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'concat': H = hidden_outputs.transpose(1, 2) scores = [] batch_size, doc_maxlen, hidden_dim = hidden_outputs.shape for doc_idx in range(H.shape[-1]): h_t = hidden_outputs[:, doc_idx, :] h_t = h_t.unsqueeze(1) h_t = h_t.repeat(1, doc_maxlen, 1) H_t = torch.cat((h_t, hidden_outputs), dim=2) H_t = self.W(H_t) H_t = torch.nn.functional.tanh(H_t) H_t = self.v(H_t) H_t = H_t.view(batch_size, doc_maxlen) scores.append(H_t) scores = torch.stack(scores) scores = scores.transpose(0, 1) scores = scores / torch.sqrt(torch.Tensor([hidden_dim])) scores = F.softmax(scores, dim=2) return scores else: raise RuntimeError('Unrecognized scoring method: %s' % self.scoring ) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Gan-Tu/ganutils
SelfAttention
false
5,193
[ "MIT" ]
1
203c703cbba0345f9cfe23b03e1e3981f03e43db
https://github.com/Gan-Tu/ganutils/tree/203c703cbba0345f9cfe23b03e1e3981f03e43db
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseSelfAttention(nn.Module): def __init__(self): super().__init__() def init_linear(self, input_linear): """Initialize linear transformation""" bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear. weight.size(1))) nn.init.uniform_(input_linear.weight, -bias, bias) if input_linear.bias is not None: input_linear.bias.data.zero_() def initialize_layers(self): raise NotImplementedError def forward(self, X): raise NotImplementedError def score(self, a, b): raise NotImplementedError class Model(BaseSelfAttention): def __init__(self, hidden_dim, scoring='general'): super().__init__() self.scoring = scoring self.hidden_dim = hidden_dim self.initialize_layers() def initialize_layers(self): if self.scoring == 'general': self.W = nn.Linear(self.hidden_dim, self.hidden_dim) self.init_linear(self.W) elif self.scoring == 'concat': self.W = nn.Linear(2 * self.hidden_dim, self.hidden_dim) self.v = nn.Linear(self.hidden_dim, 1) self.init_linear(self.W) self.init_linear(self.v) elif self.scoring == 'dot': pass else: raise RuntimeError('Unrecognized attention scoring method: %s' % self.scoring) def forward(self, hidden_outputs): scores = self.score(hidden_outputs) context = scores.bmm(hidden_outputs) return context def score(self, hidden_outputs): if self.scoring == 'dot': H = hidden_outputs.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'general': H = self.W(hidden_outputs) H = H.transpose(1, 2) attention_energies = hidden_outputs.bmm(H) scores = F.softmax(attention_energies, dim=2) return scores elif self.scoring == 'concat': H = hidden_outputs.transpose(1, 2) scores = [] batch_size, doc_maxlen, hidden_dim = hidden_outputs.shape for doc_idx in range(H.shape[-1]): h_t = hidden_outputs[:, doc_idx, :] h_t = h_t.unsqueeze(1) h_t = h_t.repeat(1, doc_maxlen, 1) H_t = torch.cat((h_t, hidden_outputs), dim=2) H_t = self.W(H_t) H_t = torch.nn.functional.tanh(H_t) H_t = self.v(H_t) H_t = H_t.view(batch_size, doc_maxlen) scores.append(H_t) scores = torch.stack(scores) scores = scores.transpose(0, 1) scores = scores / torch.sqrt(torch.Tensor([hidden_dim])) scores = F.softmax(scores, dim=2) return scores else: raise RuntimeError('Unrecognized scoring method: %s' % self.scoring ) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
ArcMarginProduct
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ce/cceitkb57gvjhg5ldpl5r7iv6yefxb7tczymuq3ptw4ifiklh2os.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F class ArcMarginProduct(nn.Module): """ Process the latent vectors to output the cosine vector for the follow-up ArcFaceLoss computation. Args: in_features: the column dimension of the weights, which is identical to the dim of latent vectors. out_features: the row dimension of the weights, which is identical to the number of classes. """ def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine = F.linear(F.normalize(features), F.normalize(self.weight)) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class ArcMarginProductNew(nn.Module): """ Process the latent vectors to output the cosine vector for the follow-up ArcFaceLoss computation. Args: in_features: the column dimension of the weights, which is identical to the dim of latent vectors. out_features: the row dimension of the weights, which is identical to the number of classes. """ def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
CTPLab/IID_representation_learning
ArcMarginProduct
false
5,194
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F class Model(nn.Module): """ Process the latent vectors to output the cosine vector for the follow-up ArcFaceLoss computation. Args: in_features: the column dimension of the weights, which is identical to the dim of latent vectors. out_features: the row dimension of the weights, which is identical to the number of classes. """ def __init__(self, in_features, out_features): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features) ) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine = F.linear(F.normalize(features), F.normalize(self.weight)) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
RingLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vj/cvjonhukcg5istyp2vzj3xmwqctpymw4xnydshvqt5dmspsjso3r.py # Topologically Sorted Source Nodes: [norm, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => mean # norm => pow_1, pow_2, sum_1 # pow_1 => pow_3 # sub => sub # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %primals_2), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 1.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_4, 2.0), kwargs = {}) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (0)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [norm, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0.run(buf3, primals_1, primals_2, buf2, 1, 64, grid=grid(1), stream=stream0) del primals_1 del primals_2 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import warnings import torch.nn as nn from torchvision.transforms import * class RingLoss(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super(RingLoss, self).__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, x): loss = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import warnings import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3, primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf3, buf2 class RingLossNew(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super(RingLossNew, self).__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, input_0): primals_2 = self.radius primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
DRACOyu/deep-person-reid
RingLoss
false
5,195
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
import torch import warnings import torch.nn as nn from torchvision.transforms import * class Model(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super().__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, x): loss = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
HardAttn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ow/cowjexxxal2b7gsm3dt4uy42yticfka2kv6a5zlnecjnm6klax5t.py # Topologically Sorted Source Nodes: [theta], Original ATen: [aten.tanh, aten.tanh_backward] # Source node to ATen node mapping: # theta => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {}) triton_poi_fused_tanh_tanh_backward_1 = async_compile.triton('triton_poi_fused_tanh_tanh_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_tanh_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [theta], Original ATen: [aten.tanh, aten.tanh_backward] triton_poi_fused_tanh_tanh_backward_1.run(buf2, primals_3, buf3, 32, grid=grid(32), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 2), (8, 2, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F from torchvision.transforms import * class HardAttn(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super(HardAttn, self).__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float)) def forward(self, x): x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1)) theta = torch.tanh(self.fc(x)) theta = theta.view(-1, 4, 2) return theta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_1[grid(32)](buf2, primals_3, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 2), (8, 2, 1), 0 ), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf3 class HardAttnNew(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super(HardAttnNew, self).__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float)) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
DRACOyu/deep-person-reid
HardAttn
false
5,196
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
import torch import torch.nn as nn from torch.nn import functional as F from torchvision.transforms import * class Model(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super().__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float)) def forward(self, x): x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1)) theta = torch.tanh(self.fc(x)) theta = theta.view(-1, 4, 2) return theta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
BertSelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mixed_query_layer], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mixed_key_layer], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mixed_value_layer], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6) return (reinterpret_tensor(buf6, (4, 4), (16, 1), 0), buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn from sklearn.metrics import * def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class BertSelfAttention(nn.Module): """ Extracted from """ def __init__(self, hidden_size): super(BertSelfAttention, self).__init__() self.num_attention_heads = 1 self.attention_head_size = int(hidden_size / self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.dropout = nn.Dropout(0.2) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: if len(attention_mask.size()) < 2: attention_mask = sequence_mask(attention_mask) reverse_mask = torch.ones(attention_mask.size()) reverse_mask[attention_mask] = 0.0 attention_scores = attention_scores + reverse_mask.unsqueeze(1 ).unsqueeze(2) * -1000000000.0 else: raise NotImplementedError attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer[:, 0, :], attention_probs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6) return reinterpret_tensor(buf6, (4, 4), (16, 1), 0 ), buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class BertSelfAttentionNew(nn.Module): """ Extracted from """ def __init__(self, hidden_size): super(BertSelfAttentionNew, self).__init__() self.num_attention_heads = 1 self.attention_head_size = int(hidden_size / self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.dropout = nn.Dropout(0.2) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Dio990521/LSTM_emo_classifier
BertSelfAttention
false
5,197
[ "MIT" ]
1
aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
https://github.com/Dio990521/LSTM_emo_classifier/tree/aaf2bf2d6a3e60c1acfcff5b82ab256f86ba0dbc
import math import torch import torch.nn as nn from sklearn.metrics import * def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class Model(nn.Module): """ Extracted from """ def __init__(self, hidden_size): super().__init__() self.num_attention_heads = 1 self.attention_head_size = int(hidden_size / self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.dropout = nn.Dropout(0.2) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: if len(attention_mask.size()) < 2: attention_mask = sequence_mask(attention_mask) reverse_mask = torch.ones(attention_mask.size()) reverse_mask[attention_mask] = 0.0 attention_scores = attention_scores + reverse_mask.unsqueeze(1 ).unsqueeze(2) * -1000000000.0 else: raise NotImplementedError attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer[:, 0, :], attention_probs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
AMCLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [X], Original ATen: [aten.div] # Source node to ATen node mapping: # X => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tw/ctwkw35qgrxnplwjus5xpzkjda4uxgj3ectshin3uc6hmrbxdxt6.py # Topologically Sorted Source Nodes: [softmax, max_1], Original ATen: [aten._softmax, aten.max] # Source node to ATen node mapping: # max_1 => max_1 # softmax => div_1, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div_1, 1), kwargs = {}) triton_poi_fused__softmax_max_2 = async_compile.triton('triton_poi_fused__softmax_max_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp8 = tmp1 / tmp6 tmp9 = tmp7 > tmp8 tmp10 = tmp7 == tmp8 tmp11 = tmp7 != tmp7 tmp12 = tmp8 != tmp8 tmp13 = tmp11 > tmp12 tmp14 = tmp9 | tmp13 tmp15 = tmp11 & tmp12 tmp16 = tmp10 | tmp15 tmp17 = tl.full([1], 0, tl.int64) tmp18 = tl.full([1], 1, tl.int64) tmp19 = tmp17 < tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp14 | tmp20 tmp22 = tl.where(tmp21, tmp7, tmp8) tmp23 = tl.where(tmp21, tmp17, tmp18) tmp24 = tmp3 / tmp6 tmp25 = tmp22 > tmp24 tmp26 = tmp22 == tmp24 tmp27 = tmp22 != tmp22 tmp28 = tmp24 != tmp24 tmp29 = tmp27 > tmp28 tmp30 = tmp25 | tmp29 tmp31 = tmp27 & tmp28 tmp32 = tmp26 | tmp31 tmp33 = tl.full([1], 2, tl.int64) tmp34 = tmp23 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tmp30 | tmp35 tmp37 = tl.where(tmp36, tmp22, tmp24) tmp38 = tl.where(tmp36, tmp23, tmp33) tmp39 = tmp5 / tmp6 tmp40 = tmp37 > tmp39 tmp41 = tmp37 == tmp39 tmp42 = tmp37 != tmp37 tmp43 = tmp39 != tmp39 tmp44 = tmp42 > tmp43 tmp45 = tmp40 | tmp44 tmp46 = tmp42 & tmp43 tmp47 = tmp41 | tmp46 tmp48 = tl.full([1], 3, tl.int64) tmp49 = tmp38 < tmp48 tmp50 = tmp47 & tmp49 tmp51 = tmp45 | tmp50 tmp52 = tl.where(tmp51, tmp37, tmp39) tmp53 = tl.where(tmp51, tmp38, tmp48) tl.store(out_ptr0 + (x2), tmp53, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ld/cldoapnebb2yhaanl4ibvt7hg5txag3xsrmgntown2nx2tizfox4.py # Topologically Sorted Source Nodes: [neighbor_bool, mul, inner, clamp, acos, geo_desic, square, sub, relu, square_1, where, geo_losses, mean], Original ATen: [aten.eq, aten.mul, aten.sum, aten.clamp, aten.acos, aten.pow, aten.rsub, aten.relu, aten.where, aten.mean] # Source node to ATen node mapping: # acos => acos # clamp => clamp_max, clamp_min_1 # geo_desic => mul_1 # geo_losses => clamp_min_2 # inner => sum_3 # mean => mean # mul => mul # neighbor_bool => eq # relu => relu # square => pow_3 # square_1 => pow_4 # sub => sub_1 # where => where # Graph fragment: # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%slice_1, %slice_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_3, %slice_4), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_3, -1e-07), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1e-07), kwargs = {}) # %acos : [num_users=1] = call_function[target=torch.ops.aten.acos.default](args = (%clamp_max,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%acos, 1.0), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_1, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.5, %mul_1), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %pow_3, %pow_4), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%where, 1e-12), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_2,), kwargs = {}) triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3 = async_compile.triton('triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 32 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr0 + (128 + r0 + (64*r1)), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp4 = tl.load(in_ptr0 + (144 + r0 + (64*r1)), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (160 + r0 + (64*r1)), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr0 + (176 + r0 + (64*r1)), None) tmp22 = tl.load(in_ptr1 + (r2), None) tmp23 = tl.load(in_ptr1 + (32 + r2), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -1e-07 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = 1e-07 tmp18 = triton_helpers.minimum(tmp16, tmp17) tmp19 = libdevice.acos(tmp18) tmp20 = 1.0 tmp21 = tmp19 * tmp20 tmp24 = tmp22 == tmp23 tmp25 = tmp21 * tmp21 tmp26 = 0.5 tmp27 = tmp26 - tmp21 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = tmp29 * tmp29 tmp31 = tl.where(tmp24, tmp25, tmp30) tmp32 = 1e-12 tmp33 = triton_helpers.maximum(tmp31, tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 32.0 tmp38 = tmp36 / tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [X], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wf], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [softmax, max_1], Original ATen: [aten._softmax, aten.max] triton_poi_fused__softmax_max_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [neighbor_bool, mul, inner, clamp, acos, geo_desic, square, sub, relu, square_1, where, geo_losses, mean], Original ATen: [aten.eq, aten.mul, aten.sum, aten.clamp, aten.acos, aten.pow, aten.rsub, aten.relu, aten.where, aten.mean] triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3.run(buf6, buf0, buf3, 1, 32, grid=grid(1), stream=stream0) del buf3 return (buf6, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AMCLoss(nn.Module): def __init__(self, in_features, out_features, s=None, m=None, device='cuda' ): """ Angular Margin Contrastive Loss https://arxiv.org/pdf/2004.09805.pdf Code converted over from Tensorflow to Pytorch """ super(AMCLoss, self).__init__() self.m = 0.5 if not m else m self.s = 1.0 if not s else s self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) self.device = device def forward(self, X, labels=None): """ input shape (N, in_features) """ X = F.normalize(X, p=2, dim=1) batch_size = X.shape[0] wf = self.fc(X) half = int(batch_size / 2) _, target_hard = torch.max(F.softmax(wf, dim=1), 1) try: neighbor_bool = torch.eq(target_hard[:half], target_hard[half:]) inner = torch.sum(X[:half] * X[half:], axis=1) except: neighbor_bool = torch.eq(target_hard[:half + 1], target_hard[half:] ) inner = torch.sum(X[:half + 1] * X[half:], axis=1) geo_desic = torch.acos(torch.clamp(inner, -1e-07, 1e-07)) * self.s geo_losses = torch.where(neighbor_bool, torch.square(geo_desic), torch.square(F.relu(self.m - geo_desic))).clamp(min=1e-12) return torch.mean(geo_losses), wf def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp8 = tmp1 / tmp6 tmp9 = tmp7 > tmp8 tmp10 = tmp7 == tmp8 tmp11 = tmp7 != tmp7 tmp12 = tmp8 != tmp8 tmp13 = tmp11 > tmp12 tmp14 = tmp9 | tmp13 tmp15 = tmp11 & tmp12 tmp16 = tmp10 | tmp15 tmp17 = tl.full([1], 0, tl.int64) tmp18 = tl.full([1], 1, tl.int64) tmp19 = tmp17 < tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp14 | tmp20 tmp22 = tl.where(tmp21, tmp7, tmp8) tmp23 = tl.where(tmp21, tmp17, tmp18) tmp24 = tmp3 / tmp6 tmp25 = tmp22 > tmp24 tmp26 = tmp22 == tmp24 tmp27 = tmp22 != tmp22 tmp28 = tmp24 != tmp24 tmp29 = tmp27 > tmp28 tmp30 = tmp25 | tmp29 tmp31 = tmp27 & tmp28 tmp32 = tmp26 | tmp31 tmp33 = tl.full([1], 2, tl.int64) tmp34 = tmp23 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tmp30 | tmp35 tmp37 = tl.where(tmp36, tmp22, tmp24) tmp38 = tl.where(tmp36, tmp23, tmp33) tmp39 = tmp5 / tmp6 tmp40 = tmp37 > tmp39 tmp41 = tmp37 == tmp39 tmp42 = tmp37 != tmp37 tmp43 = tmp39 != tmp39 tmp44 = tmp42 > tmp43 tmp45 = tmp40 | tmp44 tmp46 = tmp42 & tmp43 tmp47 = tmp41 | tmp46 tmp48 = tl.full([1], 3, tl.int64) tmp49 = tmp38 < tmp48 tmp50 = tmp47 & tmp49 tmp51 = tmp45 | tmp50 tl.where(tmp51, tmp37, tmp39) tmp53 = tl.where(tmp51, tmp38, tmp48) tl.store(out_ptr0 + x2, tmp53, xmask) @triton.jit def triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (128 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (144 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (160 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr0 + (176 + r0 + 64 * r1), None) tmp22 = tl.load(in_ptr1 + r2, None) tmp23 = tl.load(in_ptr1 + (32 + r2), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -1e-07 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = 1e-07 tmp18 = triton_helpers.minimum(tmp16, tmp17) tmp19 = libdevice.acos(tmp18) tmp20 = 1.0 tmp21 = tmp19 * tmp20 tmp24 = tmp22 == tmp23 tmp25 = tmp21 * tmp21 tmp26 = 0.5 tmp27 = tmp26 - tmp21 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = tmp29 * tmp29 tmp31 = tl.where(tmp24, tmp25, tmp30) tmp32 = 1e-12 tmp33 = triton_helpers.maximum(tmp31, tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 32.0 tmp38 = tmp36 / tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused__softmax_max_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_acos_clamp_eq_mean_mul_pow_relu_rsub_sum_where_3[grid (1)](buf6, buf0, buf3, 1, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf3 return buf6, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class AMCLossNew(nn.Module): def __init__(self, in_features, out_features, s=None, m=None, device='cuda' ): """ Angular Margin Contrastive Loss https://arxiv.org/pdf/2004.09805.pdf Code converted over from Tensorflow to Pytorch """ super(AMCLossNew, self).__init__() self.m = 0.5 if not m else m self.s = 1.0 if not s else s self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) self.device = device def forward(self, input_0): primals_2 = self.fc.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
GatorSense/LACE
AMCLoss
false
5,198
[ "MIT" ]
1
ee8194bc443886642f22c2317f5bdef23bba5147
https://github.com/GatorSense/LACE/tree/ee8194bc443886642f22c2317f5bdef23bba5147
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_features, out_features, s=None, m=None, device='cuda' ): """ Angular Margin Contrastive Loss https://arxiv.org/pdf/2004.09805.pdf Code converted over from Tensorflow to Pytorch """ super().__init__() self.m = 0.5 if not m else m self.s = 1.0 if not s else s self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) self.device = device def forward(self, X, labels=None): """ input shape (N, in_features) """ X = F.normalize(X, p=2, dim=1) batch_size = X.shape[0] wf = self.fc(X) half = int(batch_size / 2) _, target_hard = torch.max(F.softmax(wf, dim=1), 1) try: neighbor_bool = torch.eq(target_hard[:half], target_hard[half:]) inner = torch.sum(X[:half] * X[half:], axis=1) except: neighbor_bool = torch.eq(target_hard[:half + 1], target_hard[half:] ) inner = torch.sum(X[:half + 1] * X[half:], axis=1) geo_desic = torch.acos(torch.clamp(inner, -1e-07, 1e-07)) * self.s geo_losses = torch.where(neighbor_bool, torch.square(geo_desic), torch.square(F.relu(self.m - geo_desic))).clamp(min=1e-12) return torch.mean(geo_losses), wf def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
AvgPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/pr/cprzlfpjjqlj6tudvbc455jxno35xlnta4wgmkbc6uo5zmcxii4s.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => avg_pool2d # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + (x4), tmp93, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4b/c4bq4zy7b5su4xfunkxh3zumzbbjajtzrtchq4xnyu5u7efma7by.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x2 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (3*x1) + (9*x2)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchvision.transforms import * class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * ( 0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + ( -1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + x4, tmp93, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class AvgPoolPadNew(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
DRACOyu/deep-person-reid
AvgPoolPad
false
5,199
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
import torch import torch.nn as nn from torchvision.transforms import * class Model(nn.Module): def __init__(self, stride=2, padding=1): super().__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
EmbeddingModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gx/cgxraajlq2wp5cxw5sfa7jlhdy6cs5ctq4gcigpy4z6jg33sjblu.py # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # x_2 => relu_2 # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(in_out_ptr1 + (x2), tmp9, xmask) tl.store(out_ptr1 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xu/cxuokj3dvl4qdhy5uebutpnhdphnu4roqpfcjq7w7rsg4iwxl32x.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_4 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_3], 2), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((32*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 64, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((32*x1) + ((-32) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + ((-32) + x0), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_9, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_9, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b5/cb5fa7yeighdghdh7bpl3qbtzerhee5qcv2q6lmyrgoofjeovin5.py # Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu_1 # x_3 => relu_3 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 64), (64, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) buf3 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf3) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 32), (128, 32, 1), 0); del buf0 # reuse buf13 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) buf4 = reinterpret_tensor(buf3, (4, 4, 32), (128, 32, 1), 0); del buf3 # reuse buf11 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf4, primals_2, buf13, buf11, 512, grid=grid(512), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf5 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf4, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf2, primals_5, buf5, buf6, 1024, grid=grid(1024), stream=stream0) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf6, (16, 64), (64, 1), 0), reinterpret_tensor(primals_7, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_8 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf8, buf9, 64, grid=grid(64), stream=stream0) del buf8 buf10 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) buf12 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_4.run(buf5, primals_5, buf2, buf10, buf12, 512, grid=grid(512), stream=stream0) del buf2 del buf5 del primals_5 return (buf9, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 32), (32, 1), 0), reinterpret_tensor(buf6, (16, 64), (64, 1), 0), buf9, primals_7, buf10, primals_4, buf11, buf12, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn from torch import optim class EmbeddingModel(nn.Module): def __init__(self, obs_size, num_outputs): super(EmbeddingModel, self).__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32 * 2, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def forward(self, x1, x2): x1 = self.embedding(x1) x2 = self.embedding(x2) x = torch.cat([x1, x2], dim=2) x = self.last(x) return nn.Softmax(dim=2)(x) def embedding(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return x def train_model(self, batch): batch_size = torch.stack(batch.state).size()[0] states = torch.stack(batch.state).view(batch_size, config. sequence_length, self.obs_size)[:, -5:, :] next_states = torch.stack(batch.next_state).view(batch_size, config .sequence_length, self.obs_size)[:, -5:, :] actions = torch.stack(batch.action).view(batch_size, config. sequence_length, -1).long()[:, -5:, :] self.optimizer.zero_grad() net_out = self.forward(states, next_states) actions_one_hot = torch.squeeze(F.one_hot(actions, self.num_outputs) ).float() loss = nn.MSELoss()(net_out, actions_one_hot) loss.backward() self.optimizer.step() return loss.item() def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'obs_size': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn from torch import optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(in_out_ptr1 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp15 = tl.load(in_ptr2 + (32 * x1 + (-32 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (-32 + x0), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp10 = tmp9 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 64), (64, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) buf3 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf3) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 32), (128, 32, 1), 0) del buf0 buf13 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) buf4 = reinterpret_tensor(buf3, (4, 4, 32), (128, 32, 1), 0) del buf3 buf11 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, buf4, primals_2, buf13, buf11, 512, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_2 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf5 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(1024)](buf2, primals_5, buf5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf6, (16, 64), (64, 1), 0), reinterpret_tensor(primals_7, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_8 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0) del buf7 triton_poi_fused__softmax_3[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 buf10 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) buf12 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(512)](buf5, primals_5, buf2, buf10, buf12, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf5 del primals_5 return buf9, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor( primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 32), (32, 1), 0), reinterpret_tensor(buf6, (16, 64), (64, 1), 0 ), buf9, primals_7, buf10, primals_4, buf11, buf12, buf13 class EmbeddingModelNew(nn.Module): def __init__(self, obs_size, num_outputs): super(EmbeddingModelNew, self).__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32 * 2, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def embedding(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return x def train_model(self, batch): batch_size = torch.stack(batch.state).size()[0] states = torch.stack(batch.state).view(batch_size, config. sequence_length, self.obs_size)[:, -5:, :] next_states = torch.stack(batch.next_state).view(batch_size, config .sequence_length, self.obs_size)[:, -5:, :] actions = torch.stack(batch.action).view(batch_size, config. sequence_length, -1).long()[:, -5:, :] self.optimizer.zero_grad() net_out = self.forward(states, next_states) actions_one_hot = torch.squeeze(F.one_hot(actions, self.num_outputs) ).float() loss = nn.MSELoss()(net_out, actions_one_hot) loss.backward() self.optimizer.step() return loss.item() def forward(self, input_0, input_1): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_7 = self.last.weight primals_8 = self.last.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Deepest-Project/agent57_from_ngu
EmbeddingModel
false
5,200
[ "MIT" ]
1
2f596024c7538cfaa5cf63cde1b77f8a1c22d208
https://github.com/Deepest-Project/agent57_from_ngu/tree/2f596024c7538cfaa5cf63cde1b77f8a1c22d208
import torch import torch.nn.functional as F from torch import nn from torch import optim class Model(nn.Module): def __init__(self, obs_size, num_outputs): super().__init__() self.obs_size = obs_size self.num_outputs = num_outputs self.fc1 = nn.Linear(obs_size, 32) self.fc2 = nn.Linear(32, 32) self.last = nn.Linear(32 * 2, num_outputs) self.optimizer = optim.Adam(self.parameters(), lr=1e-05) def forward(self, x1, x2): x1 = self.embedding(x1) x2 = self.embedding(x2) x = torch.cat([x1, x2], dim=2) x = self.last(x) return nn.Softmax(dim=2)(x) def embedding(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return x def train_model(self, batch): batch_size = torch.stack(batch.state).size()[0] states = torch.stack(batch.state).view(batch_size, config. sequence_length, self.obs_size)[:, -5:, :] next_states = torch.stack(batch.next_state).view(batch_size, config .sequence_length, self.obs_size)[:, -5:, :] actions = torch.stack(batch.action).view(batch_size, config. sequence_length, -1).long()[:, -5:, :] self.optimizer.zero_grad() net_out = self.forward(states, next_states) actions_one_hot = torch.squeeze(F.one_hot(actions, self.num_outputs) ).float() loss = nn.MSELoss()(net_out, actions_one_hot) loss.backward() self.optimizer.step() return loss.item() def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
_ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/4q/c4qoh645afcunrhaa5xye6sbkw2mzzlvmntdpffld4732bbjzx7o.py # Topologically Sorted Source Nodes: [dimension, attn_2], Original ATen: [aten.sqrt, aten._softmax] # Source node to ATen node mapping: # attn_2 => exp # dimension => full_default # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {}) # %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False}) # %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %where_self), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_sqrt_0 = async_compile.triton('triton_poi_fused__softmax_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dimension, attn_2], Original ATen: [aten.sqrt, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_sqrt_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _ScaledDotProductAttention(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super().__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.permute(0, 2, 1)) if self.scale: dimension = torch.sqrt(torch.tensor(k.shape[-1])) attn = attn / dimension if mask is not None: attn = attn.masked_fill(mask, -1000000000.0) attn = self.softmax(attn) if self.dropout is not None: attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class _ScaledDotProductAttentionNew(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super().__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
Gian-Wiher/darts
_ScaledDotProductAttention
false
5,201
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super().__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.permute(0, 2, 1)) if self.scale: dimension = torch.sqrt(torch.tensor(k.shape[-1])) attn = attn / dimension if mask is not None: attn = attn.masked_fill(mask, -1000000000.0) attn = self.softmax(attn) if self.dropout is not None: attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return []
Fire
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/x2/cx2xvnzmnt63cxmn2w5numnmu3nfvs3w44zae4ys3rvoczt66fyt.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 8, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + ((-4) + x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + (x3), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/35/c35vxy5oullorpkglb324ij376h6o5mbjsqjoykh7yiskiyinasn.py # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf2, primals_5, buf3, primals_7, buf4, 512, grid=grid(512), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf3, primals_7, buf5, 256, grid=grid(256), stream=stream0) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf2, primals_5, buf6, 256, grid=grid(256), stream=stream0) del buf2 del primals_5 return (buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchvision.transforms import * class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x1), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf3, primals_7, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf2, primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6 class FireNew(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(FireNew, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.squeeze.weight primals_2 = self.squeeze.bias primals_4 = self.expand1x1.weight primals_5 = self.expand1x1.bias primals_6 = self.expand3x3.weight primals_7 = self.expand3x3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
DRACOyu/deep-person-reid
Fire
false
5,202
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
import torch import torch.nn as nn from torchvision.transforms import * class Model(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super().__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/on/conl6eemb3vyjzkllydlouehrcxphkzifo5kmslz6fgiz6ixsw5h.py # Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_2 => mul_2 # weight => mul_3 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/go/cgoav6av4bzem4wmdmkiowlmjpeiubwc67bqu6es4aivwlfpxzhh.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] # Source node to ATen node mapping: # out_3 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf5, primals_6, 192, grid=grid(192), stream=stream0) del primals_6 return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
CTPLab/IID_representation_learning
ToRGB
false
5,203
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 # ... truncated (>4000 chars) for memory efficiency
MaxPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jf/cjf7zenaxtvwhbfrvvghsyyrrhxyrlvtj5rotfw7n2nqtvscv3l7.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float("-inf"), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float("-inf"), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float("-inf"), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float("-inf"), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float("-inf"), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float("-inf"), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float("-inf"), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float("-inf"), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float("-inf"), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + (x4), tmp91, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4b/c4bq4zy7b5su4xfunkxh3zumzbbjajtzrtchq4xnyu5u7efma7by.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x2 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (3*x1) + (9*x2)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchvision.transforms import * class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + x4, tmp91, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)]( arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class MaxPoolPadNew(nn.Module): def __init__(self): super(MaxPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
DRACOyu/deep-person-reid
MaxPoolPad
false
5,204
[ "MIT" ]
1
8ca8be28c204dbc37cff76e77691f29045773aa2
https://github.com/DRACOyu/deep-person-reid/tree/8ca8be28c204dbc37cff76e77691f29045773aa2
import torch import torch.nn as nn from torchvision.transforms import * class Model(nn.Module): def __init__(self): super().__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
_GatedLinearUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fg/cfgseyxhpu7b6i4xtsiblktsjg6wbg2mlcsyld7lmr4dhbh7u4xc.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.glu] # Source node to ATen node mapping: # x_1 => glu # Graph fragment: # %glu : [num_users=1] = call_function[target=torch.ops.aten.glu.default](args = (%view_1,), kwargs = {}) triton_poi_fused_glu_0 = async_compile.triton('triton_poi_fused_glu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.glu] stream0 = get_raw_stream(0) triton_poi_fused_glu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_glu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) class _GatedLinearUnitNew(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Gian-Wiher/darts
_GatedLinearUnit
false
5,205
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = nn.Dropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
_AddNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2z/c2zkvxx2bap6rkrvu5u2ypibpze2q2ydbjc6qyfqciinjgpzs57e.py # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mul => mul # output => var_mean # sigmoid => sigmoid # skip => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (1)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp18 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (2)) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (3)) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp1 * tmp4 tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tmp0 + tmp7 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp10 * tmp13 tmp15 = tmp14 * tmp6 tmp16 = tmp9 + tmp15 tmp17 = tmp8 + tmp16 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp19 * tmp22 tmp24 = tmp23 * tmp6 tmp25 = tmp18 + tmp24 tmp26 = tmp17 + tmp25 tmp31 = tl.sigmoid(tmp30) tmp32 = tmp28 * tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp27 + tmp33 tmp35 = tmp26 + tmp34 tmp36 = 4.0 tmp37 = tmp35 / tmp36 tmp38 = tmp8 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp16 - tmp37 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp25 - tmp37 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp34 - tmp37 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp48 / tmp36 tl.store(out_ptr0 + (x0), tmp37, xmask) tl.store(out_ptr1 + (x0), tmp49, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sj/csj2kjzhnbru45opn7vpbebkenbpwwfmi66xlyyx5mjua6j72k6i.py # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mul => mul # output => add_1, add_2, mul_2, mul_3, rsqrt, sub # sigmoid => sigmoid # skip => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_5), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp9 = tmp7 - tmp8 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp9 * tmp13 tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0.run(primals_3, primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, skip, add, output], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(primals_3, primals_2, primals_1, buf0, buf1, primals_4, primals_5, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_5 return (buf2, primals_1, primals_2, primals_3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr2 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp18 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + 2) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr2 + 3) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp1 * tmp4 tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tmp0 + tmp7 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp10 * tmp13 tmp15 = tmp14 * tmp6 tmp16 = tmp9 + tmp15 tmp17 = tmp8 + tmp16 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp19 * tmp22 tmp24 = tmp23 * tmp6 tmp25 = tmp18 + tmp24 tmp26 = tmp17 + tmp25 tmp31 = tl.sigmoid(tmp30) tmp32 = tmp28 * tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp27 + tmp33 tmp35 = tmp26 + tmp34 tmp36 = 4.0 tmp37 = tmp35 / tmp36 tmp38 = tmp8 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp16 - tmp37 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp25 - tmp37 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp34 - tmp37 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp48 / tmp36 tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp49, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp9 = tmp7 - tmp8 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp9 * tmp13 tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0[grid(64)]( primals_3, primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(256)]( primals_3, primals_2, primals_1, buf0, buf1, primals_4, primals_5, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_5 return buf2, primals_1, primals_2, primals_3, primals_4 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _AddNormNew(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, input_0, input_1): primals_1 = self.mask primals_4 = self.norm.weight primals_5 = self.norm.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Gian-Wiher/darts
_AddNorm
false
5,206
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class Model(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
EqualLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_2, buf1, 4, grid=grid(4), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CTPLab/IID_representation_learning
EqualLinear
false
5,207
[ "MIT" ]
1
b9dc13536963f9af332b039f7cc772e2f1090c62
https://github.com/CTPLab/IID_representation_learning/tree/b9dc13536963f9af332b039f7cc772e2f1090c62
from torch.autograd import Function import math import torch import torchvision.transforms.functional as F from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class Model(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
_ResampleNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/kd/ckdxxgoclliz5lzakuv2sbeywai7mrb5oztwl6f5ipzd4hqe42pt.py # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] # Source node to ATen node mapping: # mul => mul # output => var_mean # sigmoid => sigmoid # x => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mul_1, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_mul_native_layer_norm_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp21 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp11 * tmp5 tmp13 = tmp6 + tmp12 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp14 * tmp17 tmp19 = tmp18 * tmp5 tmp20 = tmp13 + tmp19 tmp24 = tl.sigmoid(tmp23) tmp25 = tmp21 * tmp24 tmp26 = tmp25 * tmp5 tmp27 = tmp20 + tmp26 tmp28 = 4.0 tmp29 = tmp27 / tmp28 tmp30 = tmp6 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tmp12 - tmp29 tmp33 = tmp32 * tmp32 tmp34 = tmp31 + tmp33 tmp35 = tmp19 - tmp29 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp38 = tmp26 - tmp29 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp40 / tmp28 tl.store(out_ptr0 + (x0), tmp29, xmask) tl.store(out_ptr1 + (x0), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xc/cxcpfundfbswf2vcixxfx2augodmjtu2z5yzl767pujn6nzsbm4e.py # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] # Source node to ATen node mapping: # mul => mul # output => add, add_1, mul_2, mul_3, rsqrt, sub # sigmoid => sigmoid # x => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %getitem_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_4), kwargs = {}) triton_poi_fused_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_mul_native_layer_norm_sigmoid_0.run(primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, x, output], Original ATen: [aten.sigmoid, aten.mul, aten.native_layer_norm] triton_poi_fused_mul_native_layer_norm_sigmoid_1.run(primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_4 return (buf2, primals_1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp21 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp11 * tmp5 tmp13 = tmp6 + tmp12 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp14 * tmp17 tmp19 = tmp18 * tmp5 tmp20 = tmp13 + tmp19 tmp24 = tl.sigmoid(tmp23) tmp25 = tmp21 * tmp24 tmp26 = tmp25 * tmp5 tmp27 = tmp20 + tmp26 tmp28 = 4.0 tmp29 = tmp27 / tmp28 tmp30 = tmp6 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tmp12 - tmp29 tmp33 = tmp32 * tmp32 tmp34 = tmp31 + tmp33 tmp35 = tmp19 - tmp29 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp38 = tmp26 - tmp29 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp40 / tmp28 tl.store(out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + x0, tmp41, xmask) @triton.jit def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_mul_native_layer_norm_sigmoid_0[grid(64)](primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_native_layer_norm_sigmoid_1[grid(256)](primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_4 return buf2, primals_1, primals_2, primals_3 class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _ResampleNormNew(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, input_0): primals_1 = self.mask primals_3 = self.norm.weight primals_4 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Gian-Wiher/darts
_ResampleNorm
false
5,208
[ "Apache-2.0" ]
1
0d267e08643e2e3f88163a5d955b8be75840c2f6
https://github.com/Gian-Wiher/darts/tree/0d267e08643e2e3f88163a5d955b8be75840c2f6
import torch import torch.nn as nn import torch.nn.functional as F class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class Model(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
TransformerDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vh/cvhiot6d7wcfsuy6rh4gl4pcavsvp2otvw6mzmz3nqtaqwsotole.py # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%view_12, [1, 4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_1 = async_compile.triton('triton_poi_fused_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = xindex tmp0 = x0 + ((-1)*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp4 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/37/c37im5gvzgd54i6pwdkzbzagjgm2hlvyuufc537slxun4obyhoes.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => exp, sum_1 # masked_fill_ => full_default_2, where_1 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default_2, %bmm), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [-1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + (x0), tmp20, xmask) tl.store(out_ptr1 + (x0), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s7/cs7tkqbp32zttfoqu3noo4dzger63apkogo36sj6lkru33bw54tm.py # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights => div_1, exp # masked_fill_ => full_default_2, where_1 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_13, %full_default_2, %bmm), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [-1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s7/cs7p2dyxlesdvuyx4owztmqg5sapsarlgzaivin7okeoe6lxygw7.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_18, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/y6/cy6mkjdwes62jaih4dzebyknvxezhquh37cme5cflrxbxff3z675.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub_2 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_18, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_11), kwargs = {}) triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/36/c367rcv2obfrr3jnd6iifol46chcr6me3cdxpbe7jwum3jppqkna.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%view_32, [1, 4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_7 = async_compile.triton('triton_poi_fused_repeat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6h/c6hd5ncxoecwkbb7fby7ebnhqlwhh4mzmufikxyk5aouosneh42d.py # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_2], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights_2 => exp_1, sum_2 # masked_fill_ => full_default_2 # masked_fill__1 => where_2 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_33, %full_default_2, %bmm_2), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_8 = async_compile.triton('triton_poi_fused__softmax_masked_fill_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s5/cs5gdmm3aovofwif2w4uihyrz2dnsg43d5yononneqsiaiy2q2eu.py # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_2], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_weights_2 => div_3, exp_1 # masked_fill_ => full_default_2 # masked_fill__1 => where_2 # Graph fragment: # %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%view_33, %full_default_2, %bmm_2), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) triton_poi_fused__softmax_masked_fill_9 = async_compile.triton('triton_poi_fused__softmax_masked_fill_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex x4 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cn/ccn3ztml5qupb2u6trhxfni5siq2chr5f7il63gpx775u3hi6iz5.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add] # Source node to ATen node mapping: # x_4 => add_3 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %view_37), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s5/cs5qfpz5dtrtoopym4yheekk2kuetbhcvypaqjhns7stzzfhkjha.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_38, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dr/cdrqch7dkzd6wgbo46meh7vw2wmmm5avruav7pzwc4s4i4zleekn.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_4, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_38, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_38, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_22), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_23), kwargs = {}) triton_poi_fused_native_layer_norm_12 = async_compile.triton('triton_poi_fused_native_layer_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/d2/cd2jzht7h5xjxoygvrqqi5z6h7ymzldxrfdzzcr3hgci55wxxltm.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_6 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_41,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_relu_threshold_backward_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4, ), (1, )) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (4, ), (1, )) assert_size_stride(primals_22, (4, ), (1, )) assert_size_stride(primals_23, (4, ), (1, )) assert_size_stride(primals_24, (4, 4), (4, 1)) assert_size_stride(primals_25, (4, ), (1, )) assert_size_stride(primals_26, (4, 4), (4, 1)) assert_size_stride(primals_27, (4, ), (1, )) assert_size_stride(primals_28, (4, ), (1, )) assert_size_stride(primals_29, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf3, primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_prod], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] triton_poi_fused_repeat_1.run(buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0); del buf2 # reuse buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_2.run(buf7, buf6, buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [masked_fill_, attn_weights], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_3.run(buf10, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [attentioned], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_5.run(buf13, primals_1, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(buf13, primals_1, buf14, buf15, primals_10, primals_11, buf16, 64, grid=grid(64), stream=stream0) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf16, reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf17, primals_15, buf18, 16, 4, grid=grid(16, 4), stream=stream0) del primals_15 buf19 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf19) del primals_16 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf20) del primals_18 buf21 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf20, primals_19, buf21, 16, 4, grid=grid(16, 4), stream=stream0) del primals_19 buf22 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf20 # reuse # Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf19, primals_17, buf22, 16, 4, grid=grid(16, 4), stream=stream0) del primals_17 buf23 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_prod_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 0, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_7.run(primals_12, buf24, 64, grid=grid(64), stream=stream0) del primals_12 buf25 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 64), 0); del buf19 # reuse buf26 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_2], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_8.run(buf24, buf23, buf25, buf26, 64, grid=grid(64), stream=stream0) buf27 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [masked_fill_, masked_fill__1, attn_weights_2], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_9.run(buf27, buf24, buf25, buf26, 256, grid=grid(256), stream=stream0) buf28 = reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 1), 0); del buf26 # reuse # Topologically Sorted Source Nodes: [attentioned_2], Original ATen: [aten.bmm] extern_kernels.bmm(buf27, reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [contiguous_7], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf28, buf29, 16, 4, grid=grid(16, 4), stream=stream0) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0); del buf28 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0); del buf30 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf31, buf16, primals_21, 64, grid=grid(64), stream=stream0) del primals_21 buf32 = buf15; del buf15 # reuse buf33 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_11.run(buf31, buf32, buf33, 16, grid=grid(16), stream=stream0) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_12.run(buf31, buf32, buf33, primals_22, primals_23, buf34, 64, grid=grid(64), stream=stream0) del primals_23 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf34, reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0); del buf35 # reuse buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_13.run(buf36, primals_25, buf42, 64, grid=grid(64), stream=stream0) del primals_25 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0); del buf37 # reuse # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf38, buf34, primals_27, 64, grid=grid(64), stream=stream0) del primals_27 buf39 = buf33; del buf33 # reuse buf40 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_11.run(buf38, buf39, buf40, 16, grid=grid(16), stream=stream0) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_12.run(buf38, buf39, buf40, primals_28, primals_29, buf41, 64, grid=grid(64), stream=stream0) del buf39 del buf40 del primals_29 return (reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_10, primals_22, primals_28, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), buf24, buf27, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf34, reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(buf38, (16, 4), (4, 1), 0), primals_26, buf42, primals_24, primals_20, reinterpret_tensor(buf21, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf18, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0), primals_14, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch import nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerDecoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.dropout = nn.Dropout(p=dropout) self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def forward(self, x, encoder_output, encoder_mask): decoder_mask = self._create_selfattn_mask(x) residual = x x = self.self_attention(query=x, mask=decoder_mask) x = self.dropout(x) x = x + residual x = _normalize(x, self.norm1) residual = x x = self.encoder_attention(query=x, key=encoder_output, value= encoder_output, mask=encoder_mask) x = self.dropout(x) x = residual + x x = _normalize(x, self.norm2) residual = x x = self.ffn(x) x = self.dropout(x) x = residual + x x = _normalize(x, self.norm3) return x def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex tmp0 = x0 + -1 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp4 tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4, 4), (4, 1)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (4, 4), (4, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_7, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_5, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(256)](buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](buf13, primals_1, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](buf13, primals_1, buf14, buf15, primals_10, primals_11, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf17, primals_15, buf18, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_15 buf19 = buf17 del buf17 extern_kernels.mm(reinterpret_tensor(primals_13, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf19) del primals_16 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_13, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf20) del primals_18 buf21 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf20, primals_19, buf21, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_19 buf22 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf20 triton_poi_fused_clone_0[grid(16, 4)](buf19, primals_17, buf22, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_17 buf23 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 0, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_7[grid(64)](primals_12, buf24, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_12 buf25 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 64), 0) del buf19 buf26 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_8[grid(64)](buf24, buf23, buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = buf23 del buf23 triton_poi_fused__softmax_masked_fill_9[grid(256)](buf27, buf24, buf25, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 1), 0) del buf26 extern_kernels.bmm(buf27, reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf25 triton_poi_fused_clone_4[grid(16, 4)](buf28, buf29, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0) del buf28 extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0) del buf30 triton_poi_fused_add_10[grid(64)](buf31, buf16, primals_21, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_21 buf32 = buf15 del buf15 buf33 = buf14 del buf14 triton_poi_fused_native_layer_norm_11[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf31, buf32, buf33, primals_22, primals_23, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_23 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf34, reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0) del buf35 buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_13[grid(64)](buf36, primals_25, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_25 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0) del buf37 triton_poi_fused_add_10[grid(64)](buf38, buf34, primals_27, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_27 buf39 = buf33 del buf33 buf40 = buf32 del buf32 triton_poi_fused_native_layer_norm_11[grid(16)](buf38, buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf38, buf39, buf40, primals_28, primals_29, buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf39 del buf40 del primals_29 return (reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_10, primals_22, primals_28, buf7, buf10, reinterpret_tensor (buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor( primals_13, (16, 4), (4, 1), 0), buf24, buf27, reinterpret_tensor( buf29, (16, 4), (4, 1), 0), reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf34, reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(buf38, (16, 4), (4, 1), 0), primals_26, buf42, primals_24, primals_20, reinterpret_tensor(buf21, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf18, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0), primals_14, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super(TransformerFFN, self).__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class TransformerDecoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.dropout = nn.Dropout(p=dropout) self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def forward(self, input_0, input_1, input_2): primals_2 = self.self_attention.q_lin.weight primals_3 = self.self_attention.q_lin.bias primals_4 = self.self_attention.k_lin.weight primals_5 = self.self_attention.k_lin.bias primals_6 = self.self_attention.v_lin.weight primals_7 = self.self_attention.v_lin.bias primals_8 = self.self_attention.out_lin.weight primals_9 = self.self_attention.out_lin.bias primals_10 = self.norm1.weight primals_11 = self.norm1.bias primals_12 = self.encoder_attention.q_lin.weight primals_15 = self.encoder_attention.q_lin.bias primals_14 = self.encoder_attention.k_lin.weight primals_17 = self.encoder_attention.k_lin.bias primals_16 = self.encoder_attention.v_lin.weight primals_19 = self.encoder_attention.v_lin.bias primals_18 = self.encoder_attention.out_lin.weight primals_21 = self.encoder_attention.out_lin.bias primals_22 = self.norm2.weight primals_23 = self.norm2.bias primals_20 = self.ffn.lin1.weight primals_25 = self.ffn.lin1.bias primals_24 = self.ffn.lin2.weight primals_27 = self.ffn.lin2.bias primals_28 = self.norm3.weight primals_29 = self.norm3.bias primals_1 = input_0 primals_13 = input_1 primals_26 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
FrankVerhoef/Persona-Dialogue-Generation
TransformerDecoderLayer
false
5,209
[ "MIT" ]
1
ffd8413c2e8b6446097902dd1c496aeb24b852b4
https://github.com/FrankVerhoef/Persona-Dialogue-Generation/tree/ffd8413c2e8b6446097902dd1c496aeb24b852b4
import math import torch import torch.nn.functional as F from torch import nn def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super().__init__() self.n_heads = n_heads self.dim = dim self.attn_dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attn_weights = self.attn_dropout(attn_weights) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, relu_dropout=0): super().__init__() self.relu_dropout = nn.Dropout(p=relu_dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.relu_dropout(x) x = self.lin2(x) return x class Model(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0, dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.dropout = nn.Dropout(p=dropout) self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def forward(self, x, encoder_output, encoder_mask): decoder_mask = self._create_selfattn_mask(x) residual = x x = # ... truncated (>4000 chars) for memory efficiency
FeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/p7/cp7qy7k3id7baiclou4lkuuzxq3f2zjcjcnoamy3rdevi3rsgjjj.py # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] # Source node to ATen node mapping: # gelu => add, erf, mul, mul_1, mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] stream0 = get_raw_stream(0) triton_poi_fused_gelu_0.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'expansion_factor': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4 class FeedForwardNew(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
GimmeSpoon/mlp-singer
FeedForward
false
5,210
[ "MIT" ]
1
36d10a23c46fa7400994ccd063de79ff089efd5e
https://github.com/GimmeSpoon/mlp-singer/tree/36d10a23c46fa7400994ccd063de79ff089efd5e
import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = expansion_factor * num_features self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 0.5]