entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
MarginCosineProduct
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ub/cubndnqsd3c7k3peegeanrq2reqgcl7uzhnt4z7syuddwnan5w36.py # Topologically Sorted Source Nodes: [w2, ger, clamp, cosine, scatter_, mul, sub, output], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.clamp, aten.div, aten.scatter, aten.sub] # Source node to ATen node mapping: # clamp => clamp_min # cosine => div # ger => mul # mul => mul_1 # output => mul_2 # scatter_ => scatter_upon_const_tensor # sub => sub # w2 => pow_3, pow_4, sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %pow_4), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, %clamp_min), kwargs = {}) # %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: 0, dtype: torch.float32, dim: 1, selector: %view_1, val: 1.0}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_upon_const_tensor, 0.4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 30.0), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp14 = tmp13 * tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp12 * tmp24 tmp26 = 1e-08 tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp28 = tmp0 / tmp27 tmp30 = x0 tmp31 = tmp29 == tmp30 tmp32 = 1.0 tmp33 = 0.0 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = 0.4 tmp36 = tmp34 * tmp35 tmp37 = tmp28 - tmp36 tmp38 = 30.0 tmp39 = tmp37 * tmp38 tl.store(in_out_ptr0 + (x2), tmp39, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ip], Original ATen: [aten.mm] extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [w2, ger, clamp, cosine, scatter_, mul, sub, output], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.clamp, aten.div, aten.scatter, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0.run(buf2, buf0, primals_2, primals_1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (buf2, primals_1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter import torch.utils.data import torch.optim def cosine_sim(x1, x2, dim=1, eps=1e-08): ip = torch.mm(x1, x2.t()) w1 = torch.norm(x1, 2, dim) w2 = torch.norm(x2, 2, dim) return ip / torch.ger(w1, w2).clamp(min=eps) class MarginCosineProduct(nn.Module): """Implement of large margin cosine distance: : Args: in_features: size of each input sample out_features: size of each output sample s: norm of input feature m: margin """ def __init__(self, in_features, out_features, s=30.0, m=0.4): super(MarginCosineProduct, self).__init__() self.in_features = in_features self.out_features = out_features self.s = s self.m = m self.weight = Parameter(torch.Tensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) def forward(self, input, label): cosine = cosine_sim(input, self.weight) one_hot = torch.zeros_like(cosine) one_hot.scatter_(1, label.view(-1, 1), 1.0) output = self.s * (cosine - one_hot * self.m) return output def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', s=' + str(self.s) + ', m=' + str(self.m) + ')' def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0(in_out_ptr0 , in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp14 = tmp13 * tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp12 * tmp24 tmp26 = 1e-08 tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp28 = tmp0 / tmp27 tmp30 = x0 tmp31 = tmp29 == tmp30 tmp32 = 1.0 tmp33 = 0.0 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = 0.4 tmp36 = tmp34 * tmp35 tmp37 = tmp28 - tmp36 tmp38 = 30.0 tmp39 = tmp37 * tmp38 tl.store(in_out_ptr0 + x2, tmp39, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_scatter_sub_0[grid (16)](buf2, buf0, primals_2, primals_1, primals_3, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_1, primals_2, buf0 def cosine_sim(x1, x2, dim=1, eps=1e-08): ip = torch.mm(x1, x2.t()) w1 = torch.norm(x1, 2, dim) w2 = torch.norm(x2, 2, dim) return ip / torch.ger(w1, w2).clamp(min=eps) class MarginCosineProductNew(nn.Module): """Implement of large margin cosine distance: : Args: in_features: size of each input sample out_features: size of each output sample s: norm of input feature m: margin """ def __init__(self, in_features, out_features, s=30.0, m=0.4): super(MarginCosineProductNew, self).__init__() self.in_features = in_features self.out_features = out_features self.s = s self.m = m self.weight = Parameter(torch.Tensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', s=' + str(self.s) + ', m=' + str(self.m) + ')' def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
lindsey98/CosFace_pytorch
MarginCosineProduct
false
10,397
[ "MIT" ]
0
39bddf763e06c7ccd21fbf45d0c7f1f4a9d8d24d
https://github.com/lindsey98/CosFace_pytorch/tree/39bddf763e06c7ccd21fbf45d0c7f1f4a9d8d24d
SplitDim
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/mt/cmttqkny6sfnxpzdxvh5ov2uac5527dmp3f2ypze3tkvqwdngpc2.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] # Source node to ATen node mapping: # output => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_3, %view, %slice_5], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp16 = tl.load(in_ptr1 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 20.0 tmp12 = tmp10 > tmp11 tmp13 = tl_math.exp(tmp10) tmp14 = libdevice.log1p(tmp13) tmp15 = tl.where(tmp12, tmp10, tmp14) tmp18 = tmp17 > tmp11 tmp19 = tl_math.exp(tmp17) tmp20 = libdevice.log1p(tmp19) tmp21 = tl.where(tmp18, tmp17, tmp20) tmp22 = tmp15 + tmp21 tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp9, tmp22, tmp23) tmp25 = tmp0 >= tmp7 tmp26 = tl.full([1], 4, tl.int64) tmp27 = tmp0 < tmp26 tmp28 = tl.load(in_ptr0 + (2 + (4*x1) + ((-2) + x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.where(tmp9, tmp24, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn import torch.utils.data class SplitDim(nn.Module): def __init__(self, nonlin_col=1, nonlin_type=torch.nn.functional. softplus, correction=True): super(SplitDim, self).__init__() self.nonlinearity = nonlin_type self.col = nonlin_col if correction: self.var = torch.nn.Parameter(torch.zeros(1)) else: self.register_buffer('var', torch.ones(1, requires_grad=False) * -15.0) self.correction = correction def forward(self, input): transformed_output = self.nonlinearity(input[:, self.col]) transformed_output = transformed_output + self.nonlinearity(self.var) stack_list = [input[:, :self.col], transformed_output.view(-1, 1)] if self.col + 1 < input.size(1): stack_list.append(input[:, self.col + 1:]) output = torch.cat(stack_list, 1) return output def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = 20.0 tmp12 = tmp10 > tmp11 tmp13 = tl_math.exp(tmp10) tmp14 = libdevice.log1p(tmp13) tmp15 = tl.where(tmp12, tmp10, tmp14) tmp18 = tmp17 > tmp11 tmp19 = tl_math.exp(tmp17) tmp20 = libdevice.log1p(tmp19) tmp21 = tl.where(tmp18, tmp17, tmp20) tmp22 = tmp15 + tmp21 tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp9, tmp22, tmp23) tmp25 = tmp0 >= tmp7 tl.full([1], 4, tl.int64) tmp28 = tl.load(in_ptr0 + (2 + 4 * x1 + (-2 + x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.where(tmp9, tmp24, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 return buf0, primals_2 class SplitDimNew(nn.Module): def __init__(self, nonlin_col=1, nonlin_type=torch.nn.functional. softplus, correction=True): super(SplitDimNew, self).__init__() self.nonlinearity = nonlin_type self.col = nonlin_col if correction: self.var = torch.nn.Parameter(torch.zeros(1)) else: self.register_buffer('var', torch.ones(1, requires_grad=False) * -15.0) self.correction = correction def forward(self, input_0): primals_2 = self.var primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
junmokane/rlkit_jm
SplitDim
false
10,398
[ "MIT" ]
0
34a1bcf47706d4c98e9ce3b7edfd96fee6f2dd70
https://github.com/junmokane/rlkit_jm/tree/34a1bcf47706d4c98e9ce3b7edfd96fee6f2dd70
StyledConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1.0), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xb/cxbwntk4p6rlewwyy2lsbaujvvugjyjfuideulqmjkkz6oa23ili.py # Topologically Sorted Source Nodes: [mul_2, weight_1, pow_1, sum_1, add, rsqrt, weight_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # mul_2 => mul_2 # pow_1 => pow_1 # rsqrt => rsqrt # sum_1 => sum_1 # weight_1 => mul_3 # weight_2 => mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %slice_4), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %rsqrt), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sv/csv7dxittkbvogm6j67l67lwlrg4sqq5fffvklyqhs4hv3hahubs.py # Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_2 => add_2 # leaky_relu => gt, mul_6, where # mul_5 => mul_5 # out_3 => add_1 # out_4 => mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %normal_functional), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %mul_5), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %view_6), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_6), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.4142135623730951), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 25 x2 = (xindex // 100) x1 = (xindex // 25) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + (25*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = 1.4142135623730951 tmp14 = tmp12 * tmp13 tmp15 = tmp12 > tmp8 tl.store(out_ptr0 + (x3), tmp14, xmask) tl.store(out_ptr1 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_6, (1, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf0 # reuse buf4 = reinterpret_tensor(buf3, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight_1, pow_1, sum_1, add, rsqrt, weight_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional] buf8 = torch.ops.aten.normal_functional.default(buf7) del buf7 buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) # Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3.run(buf6, primals_6, buf9, primals_7, buf10, buf11, 400, grid=grid(400), stream=stream0) del buf6 del primals_6 del primals_7 return (buf10, primals_5, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (64, 64, 1, 1, 1), 0), buf4, reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), buf9, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F from torch.nn.functional import leaky_relu def fused_leaky_relu(input_, bias, negative_slope=0.2, scale=2 ** 0.5): return scale * leaky_relu(input_ + bias[:input_.shape[1]], negative_slope, inplace=True) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() k = torch.flip(k, [0, 1]) return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, ch, _in_h, _in_w = input.shape kernel_h, kernel_w = kernel.shape assert up_y == up_x and up_y in [1, 2] if up_y == 2: w = input.new_zeros(2, 2) w[0, 0] = 1 out = F.conv_transpose2d(input, w.view(1, 1, 2, 2).repeat(ch, 1, 1, 1), groups=ch, stride=2) else: out = input out = F.pad(out, [pad_x0, pad_x1, pad_y0, pad_y1]) out = F.conv2d(out, kernel.view(1, 1, kernel_h, kernel_w).repeat(ch, 1, 1, 1), groups=ch) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, x): return self.scale * leaky_relu(x + self.bias.reshape((1, -1, 1, 1)) [:, :x.shape[1]], self.negative_slope, inplace=True) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, x): if self.activation: out = F.linear(x, self.weight * self.scale) if self.activation == 'lrelu': out = fused_leaky_relu(out, self.bias * self.lr_mul) else: raise NotImplementedError else: out = F.linear(x, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): out = upfirdn2d(x, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=(1, 3, 3, 1)): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample assert not downsample, 'Downsample is not implemented yet!' self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) self.blur = Blur(blur_kernel, pad=((p + 1) // 2 + factor - 1, p // 2 + 1), upsample_factor=factor) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, x, style): batch, in_channel, height, width = x.shape style = self.modulation(style) style = style.view(batch, 1, -1, 1, 1) first_k_oup = self.first_k_oup if hasattr(self, 'first_k_oup' ) and self.first_k_oup is not None else self.weight.shape[1] assert first_k_oup <= self.weight.shape[1] weight = self.weight weight = weight[:, :first_k_oup, :in_channel].contiguous() weight = self.scale * weight * style[:, :, :in_channel] if self.demodulate: weight = weight * torch.rsqrt(weight.pow(2).sum([2, 3, 4], keepdim=True) + self.eps) if self.upsample: x = x.view(1, batch * in_channel, height, width) weight = weight.transpose(1, 2) weight = weight.reshape(weight.shape[0] * weight.shape[1], weight.shape[2], weight.shape[3], weight.shape[4]) out = F.conv_transpose2d(x, weight, padding=0, stride=2, groups =batch) out = out.view(batch, -1, out.shape[-2], out.shape[-1]) out = self.blur(out) else: x = x.contiguous().view(1, batch * in_channel, height, width) weight = weight.view(weight.shape[0] * weight.shape[1], weight. shape[2], weight.shape[3], weight.shape[4]) out = F.conv2d(x, weight, padding=self.padding, groups=batch) out = out.view(batch, -1, out.shape[-2], out.shape[-1]) return out class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConv(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=(1, 3, 3, 1), demodulate=True, activation='lrelu'): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() if activation == 'lrelu': self.activate = FusedLeakyReLU(out_channel) else: raise NotImplementedError def forward(self, x, style, noise=None): out = self.conv(x, style) out = self.noise(out, noise=noise) out = self.activate(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch import nn from torch.nn import functional as F from torch.nn.functional import leaky_relu assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 25 x2 = xindex // 100 x1 = xindex // 25 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = 1.4142135623730951 tmp14 = tmp12 * tmp13 tmp15 = tmp12 > tmp8 tl.store(out_ptr0 + x3, tmp14, xmask) tl.store(out_ptr1 + x3, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_4, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf0 buf4 = reinterpret_tensor(buf3, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32) buf8 = torch.ops.aten.normal_functional.default(buf7) del buf7 buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3[grid(400)]( buf6, primals_6, buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_6 del primals_7 return buf10, primals_5, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (64, 64, 1, 1, 1), 0 ), buf4, reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0 ), buf9, buf11 def fused_leaky_relu(input_, bias, negative_slope=0.2, scale=2 ** 0.5): return scale * leaky_relu(input_ + bias[:input_.shape[1]], negative_slope, inplace=True) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() k = torch.flip(k, [0, 1]) return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, ch, _in_h, _in_w = input.shape kernel_h, kernel_w = kernel.shape assert up_y == up_x and up_y in [1, 2] if up_y == 2: w = input.new_zeros(2, 2) w[0, 0] = 1 out = F.conv_transpose2d(input, w.view(1, 1, 2, 2).repeat(ch, 1, 1, 1), groups=ch, stride=2) else: out = input out = F.pad(out, [pad_x0, pad_x1, pad_y0, pad_y1]) out = F.conv2d(out, kernel.view(1, 1, kernel_h, kernel_w).repeat(ch, 1, 1, 1), groups=ch) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, x): return self.scale * leaky_relu(x + self.bias.reshape((1, -1, 1, 1)) [:, :x.shape[1]], self.negative_slope, inplace=True) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, x): if self.activation: out = F.linear(x, self.weight * self.scale) if self.activation == 'lrelu': out = fused_leaky_relu(out, self.bias * self.lr_mul) else: raise NotImplementedError else: out = F.linear(x, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): out = upfirdn2d(x, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=(1, 3, 3, 1)): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample assert not downsample, 'Downsample is not implemented yet!' self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) self.blur = Blur(blur_kernel, pad=((p + 1) // 2 + factor - 1, p // 2 + 1), upsample_factor=factor) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, x, style): batch, in_channel, height, width = x.shape style = self.modulation(style) style = style.view(batch, 1, -1, 1, 1) first_k_oup = self.first_k_oup if hasattr(self, 'first_k_oup' ) and self.first_k_oup is not None else self.weight.shape[1] assert first_k_oup <= self.weight.shape[1] weight = self.weight weight = weight[:, :first_k_oup, :in_channel].contiguous() weight = self.scale * weight * style[:, :, :in_channel] if self.demodulate: weight = weight * torch.rsqrt(weight.pow(2).sum([2, 3, 4], keepdim=True) + self.eps) if self.upsample: x = x.view(1, batch * in_channel, height, width) weight = weight.transpose(1, 2) weight = weight.reshape(weight.shape[0] * weight.shape[1], weight.shape[2], weight.shape[3], weight.shape[4]) out = F.conv_transpose2d(x, weight, padding=0, stride=2, groups =batch) out = out.view(batch, -1, out.shape[-2], out.shape[-1]) out = self.blur(out) else: x = x.contiguous().view(1, batch * in_channel, height, width) weight = weight.view(weight.shape[0] * weight.shape[1], weight. shape[2], weight.shape[3], weight.shape[4]) out = F.conv2d(x, weight, padding=self.padding, groups=batch) out = out.view(batch, -1, out.shape[-2], out.shape[-1]) return out class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConvNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=(1, 3, 3, 1), demodulate=True, activation='lrelu'): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() if activation == 'lrelu': self.activate = FusedLeakyReLU(out_channel) else: raise NotImplementedError def forward(self, input_0, input_1): primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_6 = self.noise.weight primals_7 = self.activate.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jchetboun/anycost-gan
StyledConv
false
10,399
[ "MIT" ]
0
7e0005e50b915e2dfeb90fe7a9846c5df38d7c06
https://github.com/jchetboun/anycost-gan/tree/7e0005e50b915e2dfeb90fe7a9846c5df38d7c06
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/f2/cf2m6qhcmiwbtav7gxsyv32awtl7srngldst5zjqkg6jzhbuk257.py # Topologically Sorted Source Nodes: [mul, intersect, mul_1, add, denominator, clamp, per_channel_dice, sub, mean], Original ATen: [aten.mul, aten.sum, aten.add, aten.clamp, aten.div, aten.rsub, aten.mean] # Source node to ATen node mapping: # add => add # clamp => clamp_min # denominator => sum_2 # intersect => sum_1 # mean => mean # mul => mul # mul_1 => mul_1 # per_channel_dice => div # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [-1]), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_2, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %clamp_min), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {}) triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp4 = tl.load(in_ptr0 + (4 + r0), None) tmp6 = tl.load(in_ptr1 + (4 + r0), None) tmp9 = tl.load(in_ptr0 + (8 + r0), None) tmp11 = tl.load(in_ptr1 + (8 + r0), None) tmp14 = tl.load(in_ptr0 + (12 + r0), None) tmp16 = tl.load(in_ptr1 + (12 + r0), None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tl.sigmoid(tmp9) tmp12 = tmp10 * tmp11 tmp13 = tmp8 + tmp12 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 2.0 tmp20 = tmp18 * tmp19 tmp21 = tmp1 + tmp2 tmp22 = tmp5 + tmp6 tmp23 = tmp21 + tmp22 tmp24 = tmp10 + tmp11 tmp25 = tmp23 + tmp24 tmp26 = tmp15 + tmp16 tmp27 = tmp25 + tmp26 tmp28 = 1e-05 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = tmp20 / tmp29 tmp31 = 1.0 tmp32 = tmp31 - tmp30 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 4.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, intersect, mul_1, add, denominator, clamp, per_channel_dice, sub, mean], Original ATen: [aten.mul, aten.sum, aten.add, aten.clamp, aten.div, aten.rsub, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0.run(buf2, arg0_1, arg1_1, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.autograd import Variable def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) axis_order = (1, 0) + tuple(range(2, tensor.dim())) transposed = tensor.permute(axis_order) return transposed.view(C, -1) def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index= None, weight=None): assert input.size() == target.size( ), "'input' and 'target' must have the same shape" if ignore_index is not None: mask = target.clone().ne_(ignore_index) mask.requires_grad = False input = input * mask target = target * mask input = flatten(input) target = flatten(target) target = target.float() intersect = (input * target).sum(-1) if weight is not None: intersect = weight * intersect denominator = (input + target).sum(-1) return 2.0 * intersect / denominator.clamp(min=epsilon) class DiceLoss(nn.Module): """Computes Dice Loss, which just 1 - DiceCoefficient described above. Additionally allows per-class weights to be provided. """ def __init__(self, epsilon=1e-05, weight=None, ignore_index=None, sigmoid_normalization=True, skip_last_target=False): super(DiceLoss, self).__init__() self.epsilon = epsilon self.register_buffer('weight', weight) self.ignore_index = ignore_index if sigmoid_normalization: self.normalization = nn.Sigmoid() else: self.normalization = nn.Softmax(dim=1) self.skip_last_target = skip_last_target def forward(self, input, target): input = self.normalization(input) if self.weight is not None: weight = Variable(self.weight, requires_grad=False) else: weight = None if self.skip_last_target: target = target[:, :-1, ...] per_channel_dice = compute_per_channel_dice(input, target, epsilon= self.epsilon, ignore_index=self.ignore_index, weight=weight) return torch.mean(1.0 - per_channel_dice) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr0 + (4 + r0), None) tmp6 = tl.load(in_ptr1 + (4 + r0), None) tmp9 = tl.load(in_ptr0 + (8 + r0), None) tmp11 = tl.load(in_ptr1 + (8 + r0), None) tmp14 = tl.load(in_ptr0 + (12 + r0), None) tmp16 = tl.load(in_ptr1 + (12 + r0), None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = tl.sigmoid(tmp9) tmp12 = tmp10 * tmp11 tmp13 = tmp8 + tmp12 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 2.0 tmp20 = tmp18 * tmp19 tmp21 = tmp1 + tmp2 tmp22 = tmp5 + tmp6 tmp23 = tmp21 + tmp22 tmp24 = tmp10 + tmp11 tmp25 = tmp23 + tmp24 tmp26 = tmp15 + tmp16 tmp27 = tmp25 + tmp26 tmp28 = 1e-05 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = tmp20 / tmp29 tmp31 = 1.0 tmp32 = tmp31 - tmp30 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = 4.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_clamp_div_mean_mul_rsub_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) axis_order = (1, 0) + tuple(range(2, tensor.dim())) transposed = tensor.permute(axis_order) return transposed.view(C, -1) def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index= None, weight=None): assert input.size() == target.size( ), "'input' and 'target' must have the same shape" if ignore_index is not None: mask = target.clone().ne_(ignore_index) mask.requires_grad = False input = input * mask target = target * mask input = flatten(input) target = flatten(target) target = target.float() intersect = (input * target).sum(-1) if weight is not None: intersect = weight * intersect denominator = (input + target).sum(-1) return 2.0 * intersect / denominator.clamp(min=epsilon) class DiceLossNew(nn.Module): """Computes Dice Loss, which just 1 - DiceCoefficient described above. Additionally allows per-class weights to be provided. """ def __init__(self, epsilon=1e-05, weight=None, ignore_index=None, sigmoid_normalization=True, skip_last_target=False): super(DiceLossNew, self).__init__() self.epsilon = epsilon self.register_buffer('weight', weight) self.ignore_index = ignore_index if sigmoid_normalization: self.normalization = nn.Sigmoid() else: self.normalization = nn.Softmax(dim=1) self.skip_last_target = skip_last_target def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
joowlim/pytorch-3dunet
DiceLoss
false
10,400
[ "MIT" ]
0
d08049f60b619627521efd0fb171247e1536b262
https://github.com/joowlim/pytorch-3dunet/tree/d08049f60b619627521efd0fb171247e1536b262
InferenceNetLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ys/cys3jebwwb7bbssorkkzehii3rj2g54ysfj5qycyaqaktbfse5sq.py # Topologically Sorted Source Nodes: [i_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # i_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr4 + (x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7h/c7hxidbimuulhwlkxrqzggjodto5os62ortmymrajg4mwpsflkh7.py # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # f_1 => sigmoid_1 # Graph fragment: # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr4 + (4 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr5 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bd/cbdel7ieyqyotjj3l56pbhbkdykcl7u4krkncs4ch6tzxqucyzky.py # Topologically Sorted Source Nodes: [o_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # o_1 => sigmoid_3 # Graph fragment: # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_3,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (12 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr4 + (12 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr5 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/um/cumvkqrcq4v7n6s35jolkn2wu5c7x5ct3me73ev7qzlxjowcziea.py # Topologically Sorted Source Nodes: [mul_2, mul_3, new_c, tanh, new_h, new_h_2, new_c_1], Original ATen: [aten.mul, aten.add, aten.tanh, aten.native_layer_norm] # Source node to ATen node mapping: # mul_2 => mul_2 # mul_3 => mul_3 # new_c => add_2 # new_c_1 => var_mean_1 # new_h => mul_4 # new_h_2 => var_mean # tanh => tanh # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_18), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_18), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %tanh), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mul_4, [3]), kwargs = {correction: 0, keepdim: True}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp0 * tmp7 tmp12 = tmp10 * tmp11 tmp14 = tmp13 * tmp11 tmp15 = tmp12 + tmp14 tmp16 = libdevice.tanh(tmp15) tmp17 = tmp9 * tmp16 tmp18 = tmp8 + tmp17 tmp22 = tmp20 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp22 + tmp24 tmp26 = libdevice.tanh(tmp25) tmp27 = tmp19 * tmp26 tmp28 = tmp18 + tmp27 tmp32 = tmp30 * tmp31 tmp34 = tmp33 * tmp31 tmp35 = tmp32 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp37 = tmp29 * tmp36 tmp38 = tmp28 + tmp37 tmp39 = 4.0 tmp40 = tmp38 / tmp39 tmp41 = tmp8 - tmp40 tmp42 = tmp41 * tmp41 tmp43 = tmp17 - tmp40 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp27 - tmp40 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp37 - tmp40 tmp50 = tmp49 * tmp49 tmp51 = tmp48 + tmp50 tmp52 = tmp51 / tmp39 tmp53 = tmp6 + tmp15 tmp54 = tmp53 + tmp25 tmp55 = tmp54 + tmp35 tmp56 = tmp55 / tmp39 tmp57 = tmp6 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tmp15 - tmp56 tmp60 = tmp59 * tmp59 tmp61 = tmp58 + tmp60 tmp62 = tmp25 - tmp56 tmp63 = tmp62 * tmp62 tmp64 = tmp61 + tmp63 tmp65 = tmp35 - tmp56 tmp66 = tmp65 * tmp65 tmp67 = tmp64 + tmp66 tmp68 = tmp67 / tmp39 tl.store(out_ptr0 + (x0), tmp40, xmask) tl.store(out_ptr1 + (x0), tmp52, xmask) tl.store(out_ptr2 + (x0), tmp56, xmask) tl.store(out_ptr3 + (x0), tmp68, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wr/cwrax6jmtc4cdewawrgq5qodxervs2qo3z2th4qqtdt4jjlavqes.py # Topologically Sorted Source Nodes: [mul_2, mul_3, new_c, tanh, new_h, new_h_2, new_c_1], Original ATen: [aten.mul, aten.add, aten.tanh, aten.native_layer_norm] # Source node to ATen node mapping: # mul_2 => mul_2 # mul_3 => mul_3 # new_c => add_2 # new_c_1 => add_5, add_6, mul_7, mul_8, rsqrt_1, sub_1 # new_h => mul_4 # new_h_2 => add_3, add_4, mul_5, mul_6, rsqrt, sub # tanh => tanh # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_18), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_18), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %tanh), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %getitem_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_19), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_20), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_7), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %primals_21), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %primals_22), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_tanh_4 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_tanh_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x2), xmask) tmp9 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr10 + (x0), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr11 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp0 * tmp7 tmp10 = tmp8 - tmp9 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp10 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp6 - tmp20 tmp23 = tmp22 + tmp12 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp21 * tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + (x2), tmp19, xmask) tl.store(out_ptr1 + (x2), tmp29, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16, ), (1, )) assert_size_stride(primals_12, (16, 4), (4, 1)) assert_size_stride(primals_13, (16, ), (1, )) assert_size_stride(primals_14, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16, ), (1, )) assert_size_stride(primals_17, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_18, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_19, (4, ), (1, )) assert_size_stride(primals_20, (4, ), (1, )) assert_size_stride(primals_21, (4, ), (1, )) assert_size_stride(primals_22, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [z_h], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [z_x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [z_bias], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [d_z_h], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [d_z_x], Original ATen: [aten.mm] extern_kernels.mm(buf1, reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_12 del primals_13 buf7 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, reinterpret_tensor(primals_17, (64, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_15 del primals_16 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_1], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf3, buf6, buf4, buf7, buf5, primals_11, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [f_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf3, buf6, buf4, buf7, buf5, primals_11, buf9, 256, grid=grid(256), stream=stream0) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [o_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf3, buf6, buf4, buf7, buf5, primals_11, buf10, 256, grid=grid(256), stream=stream0) del buf5 del primals_11 buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf15 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [mul_2, mul_3, new_c, tanh, new_h, new_h_2, new_c_1], Original ATen: [aten.mul, aten.add, aten.tanh, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_tanh_3.run(buf10, buf9, primals_18, buf8, buf11, buf12, buf14, buf15, 64, grid=grid(64), stream=stream0) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, mul_3, new_c, tanh, new_h, new_h_2, new_c_1], Original ATen: [aten.mul, aten.add, aten.tanh, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_tanh_4.run(buf10, buf9, primals_18, buf8, buf11, buf12, primals_19, primals_20, buf14, buf15, primals_21, primals_22, buf13, buf16, 256, grid=grid(256), stream=stream0) del buf11 del buf12 del buf14 del buf15 del primals_20 del primals_22 return (buf13, buf16, primals_18, primals_19, primals_21, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf3, buf1, buf4, buf2, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(primals_17, (64, 4), (4, 1), 0), buf7, buf8, buf9, buf10, primals_10, primals_9, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class InferenceNetLSTMCell(nn.Module): def __init__(self, z_dim: 'int', input_dim: 'int', hidden_hat_dim: 'int', hidden_dim: 'int'): super(InferenceNetLSTMCell, self).__init__() self.w_hh = nn.Linear(hidden_hat_dim, z_dim) self.w_hx = nn.Linear(hidden_hat_dim, z_dim) self.w_hb = nn.Linear(hidden_hat_dim, z_dim) self.W_hz = nn.Linear(z_dim, 4 * hidden_dim, bias=False) self.W_xz = nn.Linear(z_dim, 4 * hidden_dim, bias=False) self.b = nn.Linear(z_dim, 4 * hidden_dim) self.Wh = nn.Linear(hidden_dim, 4 * hidden_dim) self.Wx = nn.Linear(input_dim, 4 * hidden_dim) self.dropout = nn.Dropout(p=0.1, inplace=True) self.norm_h = nn.LayerNorm(hidden_dim) self.norm_c = nn.LayerNorm(hidden_dim) def forward(self, h_t, c, h_t_hat, inf_inputs): z_h = self.w_hh(h_t_hat) z_x = self.w_hx(h_t_hat) z_bias = self.w_hb(h_t_hat) d_z_h = self.W_hz(z_h) d_z_x = self.W_xz(z_x) b_z_b = self.b(z_bias) ifgo = d_z_h * self.Wh(h_t) + d_z_x * self.Wx(inf_inputs) + b_z_b i, f, g, o = torch.chunk(ifgo, 4, -1) i = torch.sigmoid(i) f = torch.sigmoid(f) g = torch.sigmoid(g) o = torch.sigmoid(o) new_c = f * c + i * c new_h = o * torch.tanh(new_c) new_h = self.dropout(new_h) new_h = self.norm_h(new_h) new_c = self.norm_c(new_c) return new_h, new_c def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'z_dim': 4, 'input_dim': 4, 'hidden_hat_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + (x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr4 + (x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tl.store(out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr4 + (4 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr5 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tl.store(out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + (12 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr4 + (12 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr5 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tl.store(out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp0 * tmp7 tmp12 = tmp10 * tmp11 tmp14 = tmp13 * tmp11 tmp15 = tmp12 + tmp14 tmp16 = libdevice.tanh(tmp15) tmp17 = tmp9 * tmp16 tmp18 = tmp8 + tmp17 tmp22 = tmp20 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp22 + tmp24 tmp26 = libdevice.tanh(tmp25) tmp27 = tmp19 * tmp26 tmp28 = tmp18 + tmp27 tmp32 = tmp30 * tmp31 tmp34 = tmp33 * tmp31 tmp35 = tmp32 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp37 = tmp29 * tmp36 tmp38 = tmp28 + tmp37 tmp39 = 4.0 tmp40 = tmp38 / tmp39 tmp41 = tmp8 - tmp40 tmp42 = tmp41 * tmp41 tmp43 = tmp17 - tmp40 tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = tmp27 - tmp40 tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp37 - tmp40 tmp50 = tmp49 * tmp49 tmp51 = tmp48 + tmp50 tmp52 = tmp51 / tmp39 tmp53 = tmp6 + tmp15 tmp54 = tmp53 + tmp25 tmp55 = tmp54 + tmp35 tmp56 = tmp55 / tmp39 tmp57 = tmp6 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tmp15 - tmp56 tmp60 = tmp59 * tmp59 tmp61 = tmp58 + tmp60 tmp62 = tmp25 - tmp56 tmp63 = tmp62 * tmp62 tmp64 = tmp61 + tmp63 tmp65 = tmp35 - tmp56 tmp66 = tmp65 * tmp65 tmp67 = tmp64 + tmp66 tmp68 = tmp67 / tmp39 tl.store(out_ptr0 + x0, tmp40, xmask) tl.store(out_ptr1 + x0, tmp52, xmask) tl.store(out_ptr2 + x0, tmp56, xmask) tl.store(out_ptr3 + x0, tmp68, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x2, xmask) tmp9 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr10 + x0, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp0 * tmp7 tmp10 = tmp8 - tmp9 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp10 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp6 - tmp20 tmp23 = tmp22 + tmp12 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp21 * tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (16, 4), (4, 1)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_18, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4,), (1,)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_12 del primals_13 buf7 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_16, reinterpret_tensor(primals_17, (64, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_15 del primals_16 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(256)](buf3, buf6, buf4, buf7, buf5, primals_11, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_sigmoid_1[grid(256)](buf3, buf6, buf4, buf7, buf5, primals_11, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_sigmoid_2[grid(256)](buf3, buf6, buf4, buf7, buf5, primals_11, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_11 buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf15 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_mul_native_layer_norm_tanh_3[grid(64)](buf10, buf9, primals_18, buf8, buf11, buf12, buf14, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_tanh_4[grid(256)](buf10, buf9, primals_18, buf8, buf11, buf12, primals_19, primals_20, buf14, buf15, primals_21, primals_22, buf13, buf16, 256, XBLOCK =256, num_warps=4, num_stages=1) del buf11 del buf12 del buf14 del buf15 del primals_20 del primals_22 return (buf13, buf16, primals_18, primals_19, primals_21, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf3, buf1, buf4, buf2, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(primals_17, (64, 4), (4, 1), 0), buf7, buf8, buf9, buf10, primals_10, primals_9, primals_8) class InferenceNetLSTMCellNew(nn.Module): def __init__(self, z_dim: 'int', input_dim: 'int', hidden_hat_dim: 'int', hidden_dim: 'int'): super(InferenceNetLSTMCellNew, self).__init__() self.w_hh = nn.Linear(hidden_hat_dim, z_dim) self.w_hx = nn.Linear(hidden_hat_dim, z_dim) self.w_hb = nn.Linear(hidden_hat_dim, z_dim) self.W_hz = nn.Linear(z_dim, 4 * hidden_dim, bias=False) self.W_xz = nn.Linear(z_dim, 4 * hidden_dim, bias=False) self.b = nn.Linear(z_dim, 4 * hidden_dim) self.Wh = nn.Linear(hidden_dim, 4 * hidden_dim) self.Wx = nn.Linear(input_dim, 4 * hidden_dim) self.dropout = nn.Dropout(p=0.1, inplace=True) self.norm_h = nn.LayerNorm(hidden_dim) self.norm_c = nn.LayerNorm(hidden_dim) def forward(self, input_0, input_1, input_2, input_3): primals_1 = self.w_hh.weight primals_2 = self.w_hh.bias primals_4 = self.w_hx.weight primals_5 = self.w_hx.bias primals_6 = self.w_hb.weight primals_7 = self.w_hb.bias primals_8 = self.W_hz.weight primals_9 = self.W_xz.weight primals_10 = self.b.weight primals_11 = self.b.bias primals_12 = self.Wh.weight primals_13 = self.Wh.bias primals_15 = self.Wx.weight primals_16 = self.Wx.bias primals_19 = self.norm_h.weight primals_20 = self.norm_h.bias primals_21 = self.norm_c.weight primals_22 = self.norm_c.bias primals_3 = input_0 primals_14 = input_1 primals_17 = input_2 primals_18 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22]) return output[0], output[1]
kingofpigeon/hypernlp
InferenceNetLSTMCell
false
10,401
[ "MIT" ]
0
1270ae318e698775160a6299db35752823fda7c7
https://github.com/kingofpigeon/hypernlp/tree/1270ae318e698775160a6299db35752823fda7c7
MinMaxNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/32/c32i2334e2cubbxcfo3qfe3hcxrbbjwe3jyvzl2kvxsr6gdadsdu.py # Topologically Sorted Source Nodes: [sub, mul, truediv, add], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0), kwargs = {}) triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = float("inf") tmp6 = tmp4 * tmp5 tmp7 = 0.0 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, mul, truediv, add], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MinMaxNorm(nn.Module): def __init__(self, min, max, a=0, b=1): super(MinMaxNorm, self).__init__() self.min, self.max = min, max self.a, self.b = a, b def forward(self, x): return self.a + (x - self.min) * (self.b - self.a) / (self.max - self.min) def inverse(self, x): return self.min + (x - self.a) * (self.max - self.min) / (self.b - self.a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'min': 4, 'max': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = float('inf') tmp6 = tmp4 * tmp5 tmp7 = 0.0 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MinMaxNormNew(nn.Module): def __init__(self, min, max, a=0, b=1): super(MinMaxNormNew, self).__init__() self.min, self.max = min, max self.a, self.b = a, b def inverse(self, x): return self.min + (x - self.a) * (self.max - self.min) / (self.b - self.a) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
iclementine/speedyspeech
MinMaxNorm
false
10,402
[ "BSD-3-Clause" ]
0
db527587a3699b71082d61c9e9fad7ed795d1980
https://github.com/iclementine/speedyspeech/tree/db527587a3699b71082d61c9e9fad7ed795d1980
CCAMDec
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3m/c3mxgkf4weymbmbgydi4j4i6eycdz2flzbf3jce3eapte2aqyfta.py # Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub] # Source node to ATen node mapping: # energy_new => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %bmm), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (x2), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5e/c5e7z5qmoiqut4wygb4iv6xmv65bbiotnb64o5cgidinohzcyout.py # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # out_2 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %view_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0), xmask) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attention, out], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(primals_1, primals_3, buf4, buf5, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Parameter from torch.nn import Softmax from torch.nn.parameter import Parameter class CCAMDec(Module): """ CCAM decoding module """ def __init__(self): super(CCAMDec, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) def forward(self, x, y): """ inputs : x : input feature(N,C,H,W) y:gathering centers(N,K,H,W) returns : out : compact channel attention feature attention map: K*C """ m_batchsize, C, width, height = x.size() x_reshape = x.view(m_batchsize, C, -1) B, K, _W, _H = y.size() y_reshape = y.view(B, K, -1) proj_query = x_reshape proj_key = y_reshape.permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy ) - energy attention = self.softmax(energy_new) proj_value = y.view(B, K, -1) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, C, width, height) out = x + self.scale * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn import Parameter from torch.nn import Softmax from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_1, primals_3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf5, buf4 class CCAMDecNew(Module): """ CCAM decoding module """ def __init__(self): super(CCAMDecNew, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) def forward(self, input_0, input_1): primals_3 = self.scale primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
bfjei2825401/siamban
CCAMDec
false
10,403
[ "Apache-2.0" ]
0
c41d58742b146dfc8960053453227c6e9fec1bac
https://github.com/bfjei2825401/siamban/tree/c41d58742b146dfc8960053453227c6e9fec1bac
PAM_Module
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/sr/csrg6irduolxnaubd5v3tlh5eeuhw27sxkg3o56t4veh47sq6ce3.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 2 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/j4/cj4f6qdb45emg4zrdv5vzxtw2vswpyt2rqyalr6mxgomzeyk55j5.py # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # out_2 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, %view_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2, ), (1, )) assert_size_stride(primals_4, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (2, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 128, grid=grid(128), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 128, grid=grid(128), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 2), (32, 1, 16), 0), reinterpret_tensor(buf3, (4, 2, 16), (32, 16, 1), 0), out=buf4) buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0) del buf4 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf9, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(primals_8, buf10, primals_1, buf11, 256, grid=grid(256), stream=stream0) return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, buf7, buf10, reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf1, (4, 2, 16), (32, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 2), (32, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Conv2d from torch.nn import Parameter from torch.nn import Softmax from torch.nn.parameter import Parameter class PAM_Module(Module): """ Position attention module""" def __init__(self, in_dim): super(PAM_Module, self).__init__() self.channel_in = in_dim out_channels = max(in_dim // 8, min(in_dim, 2)) self.query_conv = Conv2d(in_channels=in_dim, out_channels= out_channels, kernel_size=1) self.key_conv = Conv2d(in_channels=in_dim, out_channels= out_channels, kernel_size=1) self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, x): """ inputs : x : input feature maps( B X C X H X W) returns : out : attention value + input feature attention: B X (HxW) X (HxW) """ m_batchsize, C, height, width = x.size() proj_query = self.query_conv(x).view(m_batchsize, -1, width * height ).permute(0, 2, 1) proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, height, width) out = self.gamma * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn import Conv2d from torch.nn import Parameter from torch.nn import Softmax from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(128)](buf1, primals_3, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(128)](buf3, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 2), (32, 1, 16), 0), reinterpret_tensor(buf3, (4, 2, 16), (32, 16, 1), 0), out=buf4) buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf4 buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_2[grid(256)](buf9, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out =buf10) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_8, buf10, primals_1, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, buf7, buf10, reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf1, (4, 2, 16), (32, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 2), (32, 1, 16), 0)) class PAM_ModuleNew(Module): """ Position attention module""" def __init__(self, in_dim): super(PAM_ModuleNew, self).__init__() self.channel_in = in_dim out_channels = max(in_dim // 8, min(in_dim, 2)) self.query_conv = Conv2d(in_channels=in_dim, out_channels= out_channels, kernel_size=1) self.key_conv = Conv2d(in_channels=in_dim, out_channels= out_channels, kernel_size=1) self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, input_0): primals_8 = self.gamma primals_2 = self.query_conv.weight primals_3 = self.query_conv.bias primals_4 = self.key_conv.weight primals_5 = self.key_conv.bias primals_6 = self.value_conv.weight primals_7 = self.value_conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
bfjei2825401/siamban
PAM_Module
false
10,404
[ "Apache-2.0" ]
0
c41d58742b146dfc8960053453227c6e9fec1bac
https://github.com/bfjei2825401/siamban/tree/c41d58742b146dfc8960053453227c6e9fec1bac
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/dk/cdkfylhbmav6z7nn6vy4nwynu3ry6ilbxpjspabpseiuux7csyql.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # input_3 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_group_norm_0 = async_compile.triton('triton_poi_fused_native_group_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + (x0), tmp13, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kd/ckdswb4lufwiij2wklkypvchblxzpif4ph4uxoqoklvcbijmlski.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # input_3 => add_1, mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_7), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_4), kwargs = {}) triton_poi_fused_native_group_norm_1 = async_compile.triton('triton_poi_fused_native_group_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool3d_with_indices] buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [2, 2, 2], [2, 2, 2]) del primals_1 buf1 = buf0[0] del buf0 # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1)) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_group_norm_0.run(buf3, buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_1.run(buf3, buf4, buf5, primals_3, primals_4, buf6, 64, grid=grid(64), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (0, 16, 4, 2, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1)) buf8 = buf5; del buf5 # reuse buf9 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_0.run(buf7, buf8, buf9, 16, grid=grid(16), stream=stream0) buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_1.run(buf7, buf8, buf9, primals_6, primals_7, buf10, 64, grid=grid(64), stream=stream0) del buf8 del buf9 del primals_7 return (buf10, primals_2, primals_3, primals_5, primals_6, reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf3, reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels order (string): order of things, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int): add zero-padding to the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') assert not is_before_conv, 'GroupNorm MUST go after the Conv3d' if out_channels < num_groups: num_groups = out_channels modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'crg', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class DoubleConv(nn.Sequential): """ A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). We use (Conv3d+ReLU+GroupNorm3d) by default. This can be changed however by providing the 'order' argument, e.g. in order to change to Conv3d+BatchNorm3d+ELU use order='cbe'. Use padded convolutions to make sure that the output (H_out, W_out) is the same as (H_in, W_in), so that you don't have to crop in the decoder path. Args: in_channels (int): number of input channels out_channels (int): number of output channels encoder (bool): if True we're in the encoder path, otherwise we're in the decoder kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='crg', num_groups=8): super(DoubleConv, self).__init__() if encoder: conv1_in_channels = in_channels conv1_out_channels = out_channels // 2 if conv1_out_channels < in_channels: conv1_out_channels = in_channels conv2_in_channels, conv2_out_channels = (conv1_out_channels, out_channels) else: conv1_in_channels, conv1_out_channels = in_channels, out_channels conv2_in_channels, conv2_out_channels = out_channels, out_channels self.add_module('SingleConv1', SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups)) self.add_module('SingleConv2', SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups)) class Encoder(nn.Module): """ A single module from the encoder path consisting of the optional max pooling layer (one may specify the MaxPool kernel_size to be different than the standard (2,2,2), e.g. if the volumetric data is anisotropic (make sure to use complementary scale_factor in the decoder path) followed by a DoubleConv module. Args: in_channels (int): number of input channels out_channels (int): number of output channels conv_kernel_size (int): size of the convolving kernel apply_pooling (bool): if True use MaxPool3d before DoubleConv pool_kernel_size (tuple): the size of the window to take a max over pool_type (str): pooling layer: 'max' or 'avg' basic_module(nn.Module): either ResNetBlock or DoubleConv conv_layer_order (string): determines the order of layers in `DoubleConv` module. See `DoubleConv` for more info. num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, conv_kernel_size=3, apply_pooling=True, pool_kernel_size=(2, 2, 2), pool_type='max', basic_module=DoubleConv, conv_layer_order='crg', num_groups=8): super(Encoder, self).__init__() assert pool_type in ['max', 'avg'] if apply_pooling: if pool_type == 'max': self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size) else: self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size) else: self.pooling = None self.basic_module = basic_module(in_channels, out_channels, encoder =True, kernel_size=conv_kernel_size, order=conv_layer_order, num_groups=num_groups) def forward(self, x): if self.pooling is not None: x = self.pooling(x) x = self.basic_module(x) return x def get_inputs(): return [torch.rand([4, 8, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [2, 2, 2], [2, 2, 2]) del primals_1 buf1 = buf0[0] del buf0 buf3 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1)) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_group_norm_0[grid(16)](buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_native_group_norm_1[grid(64)](buf3, buf4, buf5, primals_3, primals_4, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (0, 16, 4, 2, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1)) buf8 = buf5 del buf5 buf9 = buf4 del buf4 triton_poi_fused_native_group_norm_0[grid(16)](buf7, buf8, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_native_group_norm_1[grid(64)](buf7, buf8, buf9, primals_6, primals_7, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 del buf9 del primals_7 return (buf10, primals_2, primals_3, primals_5, primals_6, reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf3, reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf7) def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels order (string): order of things, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int): add zero-padding to the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') assert not is_before_conv, 'GroupNorm MUST go after the Conv3d' if out_channels < num_groups: num_groups = out_channels modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'crg', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class DoubleConv(nn.Sequential): """ A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). We use (Conv3d+ReLU+GroupNorm3d) by default. This can be changed however by providing the 'order' argument, e.g. in order to change to Conv3d+BatchNorm3d+ELU use order='cbe'. Use padded convolutions to make sure that the output (H_out, W_out) is the same as (H_in, W_in), so that you don't have to crop in the decoder path. Args: in_channels (int): number of input channels out_channels (int): number of output channels encoder (bool): if True we're in the encoder path, otherwise we're in the decoder kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='crg', num_groups=8): super(DoubleConv, self).__init__() if encoder: conv1_in_channels = in_channels conv1_out_channels = out_channels // 2 if conv1_out_channels < in_channels: conv1_out_channels = in_channels conv2_in_channels, conv2_out_channels = (conv1_out_channels, out_channels) else: conv1_in_channels, conv1_out_channels = in_channels, out_channels conv2_in_channels, conv2_out_channels = out_channels, out_channels self.add_module('SingleConv1', SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups)) self.add_module('SingleConv2', SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups)) class EncoderNew(nn.Module): """ A single module from the encoder path consisting of the optional max pooling layer (one may specify the MaxPool kernel_size to be different than the standard (2,2,2), e.g. if the volumetric data is anisotropic (make sure to use complementary scale_factor in the decoder path) followed by a DoubleConv module. Args: in_channels (int): number of input channels out_channels (int): number of output channels conv_kernel_size (int): size of the convolving kernel apply_pooling (bool): if True use MaxPool3d before DoubleConv pool_kernel_size (tuple): the size of the window to take a max over pool_type (str): pooling layer: 'max' or 'avg' basic_module(nn.Module): either ResNetBlock or DoubleConv conv_layer_order (string): determines the order of layers in `DoubleConv` module. See `DoubleConv` for more info. num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, conv_kernel_size=3, apply_pooling=True, pool_kernel_size=(2, 2, 2), pool_type='max', basic_module=DoubleConv, conv_layer_order='crg', num_groups=8): super(EncoderNew, self).__init__() assert pool_type in ['max', 'avg'] if apply_pooling: if pool_type == 'max': self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size) else: self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size) else: self.pooling = None self.basic_module = basic_module(in_channels, out_channels, encoder =True, kernel_size=conv_kernel_size, order=conv_layer_order, num_groups=num_groups) def forward(self, input_0): primals_2 = self.basic_module.SingleConv1.conv.weight primals_3 = self.basic_module.SingleConv1.groupnorm.weight primals_4 = self.basic_module.SingleConv1.groupnorm.bias primals_5 = self.basic_module.SingleConv2.conv.weight primals_6 = self.basic_module.SingleConv2.groupnorm.weight primals_7 = self.basic_module.SingleConv2.groupnorm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
joowlim/pytorch-3dunet
Encoder
false
10,405
[ "MIT" ]
0
d08049f60b619627521efd0fb171247e1536b262
https://github.com/joowlim/pytorch-3dunet/tree/d08049f60b619627521efd0fb171247e1536b262
StandardNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yi/cyiakmznzllqklcnfmh5hu3smacswbrh4drlxafkndcx6god674a.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class StandardNorm(nn.Module): def __init__(self, mean, std): super(StandardNorm, self).__init__() self.mean = mean self.std = std def forward(self, x): return (x - self.mean) / self.std def inverse(self, x): return x * self.std + self.mean def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'mean': 4, 'std': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class StandardNormNew(nn.Module): def __init__(self, mean, std): super(StandardNormNew, self).__init__() self.mean = mean self.std = std def inverse(self, x): return x * self.std + self.mean def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
iclementine/speedyspeech
StandardNorm
false
10,406
[ "BSD-3-Clause" ]
0
db527587a3699b71082d61c9e9fad7ed795d1980
https://github.com/iclementine/speedyspeech/tree/db527587a3699b71082d61c9e9fad7ed795d1980
EuclideanComparator_1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/kf/ckf5m3mk5oxwljoolseccy4i3hna4r2zdcsgb4fkgn7aaawvw5zq.py # Topologically Sorted Source Nodes: [dist, truediv], Original ATen: [aten.dist, aten.div] # Source node to ATen node mapping: # dist => pow_1, pow_2, sub, sum_1 # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, 4), kwargs = {}) triton_per_fused_dist_div_0 = async_compile.triton('triton_per_fused_dist_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dist_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dist_div_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = libdevice.sqrt(tmp6) tmp8 = 0.25 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [dist, truediv], Original ATen: [aten.dist, aten.div] stream0 = get_raw_stream(0) triton_per_fused_dist_div_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from dataclasses import dataclass from collections import defaultdict import torch.optim from torch import nn class Base(nn.Module): registered = defaultdict(dict) @dataclass class Config: pass @property def config(self): return self._config def __init__(self, *args, config: Config=None, **kwargs): super().__init__(*args, **kwargs) self._config = config def __str__(self) ->str: return self.__name__ @classmethod def module(Child, Impl): try: Impl.name except AttributeError: msg = 'Class {Impl} has no attribute .name' raise irtm.IRTMError(msg) Base.registered[Child.__name__][Impl.name] = Impl return Impl @classmethod def init(Child, *, name: str=None, **kwargs): try: if name is None: name = 'noop' A = Base.registered[Child.__name__][name] except KeyError: dicrep = yaml.dump(Base.registered, default_flow_style=False) msg = ( f'could not find module "{name}"\n\navailable modules:\n{dicrep}' ) raise irtm.IRTMError(msg) config = A.Config(**kwargs) log.info(f'! initializing {A.__name__} with {config}') return A(config=config) class Comparator(Base): pass @Comparator.module class EuclideanComparator_1(Comparator): name = 'euclidean 1' def forward(self, X, Y): return torch.dist(X, Y, p=2) / X.shape[0] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from dataclasses import dataclass from collections import defaultdict import torch.optim from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_dist_div_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = libdevice.sqrt(tmp6) tmp8 = 0.25 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_dist_div_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class Base(nn.Module): registered = defaultdict(dict) @dataclass class Config: pass @property def config(self): return self._config def __init__(self, *args, config: Config=None, **kwargs): super().__init__(*args, **kwargs) self._config = config def __str__(self) ->str: return self.__name__ @classmethod def module(Child, Impl): try: Impl.name except AttributeError: msg = 'Class {Impl} has no attribute .name' raise irtm.IRTMError(msg) Base.registered[Child.__name__][Impl.name] = Impl return Impl @classmethod def init(Child, *, name: str=None, **kwargs): try: if name is None: name = 'noop' A = Base.registered[Child.__name__][name] except KeyError: dicrep = yaml.dump(Base.registered, default_flow_style=False) msg = ( f'could not find module "{name}"\n\navailable modules:\n{dicrep}' ) raise irtm.IRTMError(msg) config = A.Config(**kwargs) log.info(f'! initializing {A.__name__} with {config}') return A(config=config) class Comparator(Base): pass @Comparator.module class EuclideanComparator_1New(Comparator): name = 'euclidean 1' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
lavis-nlp/irtm
EuclideanComparator_1
false
10,407
[ "MIT" ]
0
e6c96519918795cfaa0c09ef2d4164f451265518
https://github.com/lavis-nlp/irtm/tree/e6c96519918795cfaa0c09ef2d4164f451265518
AffineConstantFlow
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/n3/cn3wlek7bl5jirlp73pieiarvuadnzcb6uy5z3ieztq35hnq6trv.py # Topologically Sorted Source Nodes: [exp, mul, z], Original ATen: [aten.exp, aten.mul, aten.add] # Source node to ATen node mapping: # exp => exp # mul => mul # z => add # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_exp_mul_0 = async_compile.triton('triton_poi_fused_add_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zj/czjrqmkyp2ezurdzw3bheq5i5tw7cwlzcc36wzadkzk7h5or6ugs.py # Topologically Sorted Source Nodes: [log_det, repeat], Original ATen: [aten.sum, aten.repeat] # Source node to ATen node mapping: # log_det => sum_1 # repeat => repeat # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [1]), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%sum_1, [4, 1]), kwargs = {}) triton_per_fused_repeat_sum_1 = async_compile.triton('triton_per_fused_repeat_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_repeat_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_repeat_sum_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp, mul, z], Original ATen: [aten.exp, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_mul_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [log_det, repeat], Original ATen: [aten.sum, aten.repeat] triton_per_fused_repeat_sum_1.run(primals_1, buf2, 1, 4, grid=grid(1), stream=stream0) return (buf0, buf2, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn class FlowBlock(nn.Module): """ Abstract base class for any flow blocks. """ def __init__(self, dimension): super(FlowBlock, self).__init__() self.dimension = dimension def forward(self, x: 'Tensor') ->(Tensor, Tensor): """ When implemented, forward method will represent z = f(x) and log |det f'(x)/dx| x: (*, dimension), z: (*, dimension) and log_det: (*, 1) """ raise NotImplementedError('Forward not implemented') def inverse(self, z: 'Tensor') ->(Tensor, Tensor): """ When implemented, inverse method will represent x = f^-(z) and log |det f^-'(z)/dz| z: (*, dimension), x: (*, dimension) and log_det: (*, 1) """ raise NotImplementedError('Inverse not implemented') class AffineConstantFlow(FlowBlock): """ Scales + Shifts the flow by (learned) constants per dimension. In NICE paper there is a Scaling layer which is a special case of this where t is None """ def __init__(self, dimension, scale=True, shift=True): super().__init__(dimension) zeros = torch.zeros(size=(1, dimension)) self.s = nn.Parameter(torch.randn(1, dimension, requires_grad=True) ) if scale else zeros self.t = nn.Parameter(torch.randn(1, dimension, requires_grad=True) ) if shift else zeros def forward(self, x) ->(Tensor, Tensor): z = x * torch.exp(self.s) + self.t log_det = torch.sum(self.s, dim=1) return z, log_det.repeat(x.shape[0], 1) def inverse(self, z) ->(Tensor, Tensor): x = (z - self.t) * torch.exp(-self.s) log_det = torch.sum(-self.s, dim=1) return x, log_det.repeat(z.shape[0], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_per_fused_repeat_sum_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp3, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_per_fused_repeat_sum_1[grid(1)](primals_1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) return buf0, buf2, primals_1, primals_2 class FlowBlock(nn.Module): """ Abstract base class for any flow blocks. """ def __init__(self, dimension): super(FlowBlock, self).__init__() self.dimension = dimension def forward(self, x: 'Tensor') ->(Tensor, Tensor): """ When implemented, forward method will represent z = f(x) and log |det f'(x)/dx| x: (*, dimension), z: (*, dimension) and log_det: (*, 1) """ raise NotImplementedError('Forward not implemented') def inverse(self, z: 'Tensor') ->(Tensor, Tensor): """ When implemented, inverse method will represent x = f^-(z) and log |det f^-'(z)/dz| z: (*, dimension), x: (*, dimension) and log_det: (*, 1) """ raise NotImplementedError('Inverse not implemented') class AffineConstantFlowNew(FlowBlock): """ Scales + Shifts the flow by (learned) constants per dimension. In NICE paper there is a Scaling layer which is a special case of this where t is None """ def __init__(self, dimension, scale=True, shift=True): super().__init__(dimension) zeros = torch.zeros(size=(1, dimension)) self.s = nn.Parameter(torch.randn(1, dimension, requires_grad=True) ) if scale else zeros self.t = nn.Parameter(torch.randn(1, dimension, requires_grad=True) ) if shift else zeros def inverse(self, z) ->(Tensor, Tensor): x = (z - self.t) * torch.exp(-self.s) log_det = torch.sum(-self.s, dim=1) return x, log_det.repeat(z.shape[0], 1) def forward(self, input_0): primals_1 = self.s primals_3 = self.t primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
lleonart1984/generative_modeling
AffineConstantFlow
false
10,408
[ "MIT" ]
0
d47c53d34b9eb704b6e8b2c334262b53fe7f4f32
https://github.com/lleonart1984/generative_modeling/tree/d47c53d34b9eb704b6e8b2c334262b53fe7f4f32
MaxPoolingAggregator_1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2l/c2lm5wvy5varadxpp77k6lvi6yjwzernwi4uqg6gmabg2nygeeur.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from dataclasses import dataclass from collections import defaultdict import torch.optim from torch import nn class Base(nn.Module): registered = defaultdict(dict) @dataclass class Config: pass @property def config(self): return self._config def __init__(self, *args, config: Config=None, **kwargs): super().__init__(*args, **kwargs) self._config = config def __str__(self) ->str: return self.__name__ @classmethod def module(Child, Impl): try: Impl.name except AttributeError: msg = 'Class {Impl} has no attribute .name' raise irtm.IRTMError(msg) Base.registered[Child.__name__][Impl.name] = Impl return Impl @classmethod def init(Child, *, name: str=None, **kwargs): try: if name is None: name = 'noop' A = Base.registered[Child.__name__][name] except KeyError: dicrep = yaml.dump(Base.registered, default_flow_style=False) msg = ( f'could not find module "{name}"\n\navailable modules:\n{dicrep}' ) raise irtm.IRTMError(msg) config = A.Config(**kwargs) log.info(f'! initializing {A.__name__} with {config}') return A(config=config) class Aggregator(Base): pass @Aggregator.module class MaxPoolingAggregator_1(Aggregator): name = 'max 1' def forward(self, X: 'torch.Tensor') ->torch.Tensor: return X.max(axis=1).values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from dataclasses import dataclass from collections import defaultdict import torch.optim from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class Base(nn.Module): registered = defaultdict(dict) @dataclass class Config: pass @property def config(self): return self._config def __init__(self, *args, config: Config=None, **kwargs): super().__init__(*args, **kwargs) self._config = config def __str__(self) ->str: return self.__name__ @classmethod def module(Child, Impl): try: Impl.name except AttributeError: msg = 'Class {Impl} has no attribute .name' raise irtm.IRTMError(msg) Base.registered[Child.__name__][Impl.name] = Impl return Impl @classmethod def init(Child, *, name: str=None, **kwargs): try: if name is None: name = 'noop' A = Base.registered[Child.__name__][name] except KeyError: dicrep = yaml.dump(Base.registered, default_flow_style=False) msg = ( f'could not find module "{name}"\n\navailable modules:\n{dicrep}' ) raise irtm.IRTMError(msg) config = A.Config(**kwargs) log.info(f'! initializing {A.__name__} with {config}') return A(config=config) class Aggregator(Base): pass @Aggregator.module class MaxPoolingAggregator_1New(Aggregator): name = 'max 1' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
lavis-nlp/irtm
MaxPoolingAggregator_1
false
10,409
[ "MIT" ]
0
e6c96519918795cfaa0c09ef2d4164f451265518
https://github.com/lavis-nlp/irtm/tree/e6c96519918795cfaa0c09ef2d4164f451265518
CPAMDec
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/j4/cj4f6qdb45emg4zrdv5vzxtw2vswpyt2rqyalr6mxgomzeyk55j5.py # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # out_2 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %view_6), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf3, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_7 del primals_8 buf8 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (4, 4, 16), (64, 1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(primals_9, buf8, primals_1, buf9, 256, grid=grid(256), stream=stream0) return (buf9, primals_1, primals_3, primals_9, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, buf8, reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Conv2d from torch.nn import Parameter from torch.nn import Softmax from torch.nn import Linear from torch.nn.parameter import Parameter class CPAMDec(Module): """ CPAM decoding module """ def __init__(self, in_channels): super(CPAMDec, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) self.conv_query = Conv2d(in_channels=in_channels, out_channels= in_channels // 4, kernel_size=1) self.conv_key = Linear(in_channels, in_channels // 4) self.conv_value = Linear(in_channels, in_channels) def forward(self, x, y): """ inputs : x : input feature(N,C,H,W) y:gathering centers(N,K,M) returns : out : compact position attention feature attention map: (H*W)*M """ m_batchsize, C, width, height = x.size() m_batchsize, K, _M = y.size() proj_query = self.conv_query(x).view(m_batchsize, -1, width * height ).permute(0, 2, 1) proj_key = self.conv_key(y).view(m_batchsize, K, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.conv_value(y).permute(0, 2, 1) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.scale * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn import Conv2d from torch.nn import Parameter from torch.nn import Softmax from torch.nn import Linear from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf3, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf7) del primals_7 del primals_8 buf8 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (4, 4, 16), (64, 1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_9, buf8, primals_1, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf9, primals_1, primals_3, primals_9, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, buf8, reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 16, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0) class CPAMDecNew(Module): """ CPAM decoding module """ def __init__(self, in_channels): super(CPAMDecNew, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) self.conv_query = Conv2d(in_channels=in_channels, out_channels= in_channels // 4, kernel_size=1) self.conv_key = Linear(in_channels, in_channels // 4) self.conv_value = Linear(in_channels, in_channels) def forward(self, input_0, input_1): primals_4 = self.scale primals_3 = self.conv_query.weight primals_6 = self.conv_query.bias primals_5 = self.conv_key.weight primals_9 = self.conv_key.bias primals_7 = self.conv_value.weight primals_8 = self.conv_value.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
bfjei2825401/siamban
CPAMDec
false
10,410
[ "Apache-2.0" ]
0
c41d58742b146dfc8960053453227c6e9fec1bac
https://github.com/bfjei2825401/siamban/tree/c41d58742b146dfc8960053453227c6e9fec1bac
LearnedPositionalEncoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/up/cuphmudfbm5xmuymbvibhn5q6e5y55prj6knqkmj43mx7ovcvcgz.py # Topologically Sorted Source Nodes: [pos], Original ATen: [aten.arange] # Source node to ATen node mapping: # pos => iota # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) triton_poi_fused_arange_0 = async_compile.triton('triton_poi_fused_arange_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7g/c7ggkspjiryu6lrnyqmt3ltk6cvy3xv7hlniyify4whtbxpu3vcz.py # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] # Source node to ATen node mapping: # embedding => embedding # Graph fragment: # %embedding : [num_users=2] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %expand), kwargs = {}) triton_poi_fused_embedding_1 = async_compile.triton('triton_poi_fused_embedding_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 100, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 100)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 100") tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y3/cy35gsxm3xa2mi4ahfwlz6f5exunl2ig4pka6onlgazxeb4uuhe3.py # Topologically Sorted Source Nodes: [x, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # pow_1 => pow_1 # s => mean_1 # sub => sub # u => mean # x => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) triton_poi_fused_add_mean_pow_sub_2 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_pow_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hy/chysp3ia3h42md5xyhzqsgskl34zzc6iy7yp2pqz3zla4xgfsa47.py # Topologically Sorted Source Nodes: [x, u, sub, add_1, sqrt, x_1, mul, add_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # mul => mul # sqrt => sqrt # sub => sub # u => mean # x => add # x_1 => div # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_4), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_3 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (100, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [pos], Original ATen: [aten.arange] stream0 = get_raw_stream(0) triton_poi_fused_arange_0.run(buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] triton_poi_fused_embedding_1.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] triton_poi_fused_add_mean_pow_sub_2.run(primals_1, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, u, sub, add_1, sqrt, x_1, mul, add_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_sqrt_sub_3.run(primals_3, primals_1, buf1, buf2, buf3, primals_4, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_4 return (buf4, primals_1, primals_3, reinterpret_tensor(buf0, (4, 1), (1, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class LayerNorm(nn.Module): """A layernorm module in the TF style (epsilon inside the square root).""" def __init__(self, d_model, variance_epsilon=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class LearnedPositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=100): super(LearnedPositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) self.pos_embed = nn.Embedding(max_len, d_model) self.layernorm = LayerNorm(d_model) def forward(self, x): seq_len = x.size(0) pos = torch.arange(seq_len, dtype=torch.long, device=x.device) pos = pos.unsqueeze(-1).expand(x.size()[:2]) x = x + self.pos_embed(pos) return self.dropout(self.layernorm(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 100, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 100) | ~xmask, 'index out of bounds: 0 <= tmp4 < 100') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_pow_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (100, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_embedding_1[grid(64)](buf0, primals_2, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_mean_pow_sub_2[grid(64)](primals_1, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_3[grid(256)](primals_3, primals_1, buf1, buf2, buf3, primals_4, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_4 return buf4, primals_1, primals_3, reinterpret_tensor(buf0, (4, 1), (1, 1), 0), buf1 class LayerNorm(nn.Module): """A layernorm module in the TF style (epsilon inside the square root).""" def __init__(self, d_model, variance_epsilon=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class LearnedPositionalEncodingNew(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=100): super(LearnedPositionalEncodingNew, self).__init__() self.dropout = nn.Dropout(p=dropout) self.pos_embed = nn.Embedding(max_len, d_model) self.layernorm = LayerNorm(d_model) def forward(self, input_0): primals_2 = self.pos_embed.weight primals_3 = self.layernorm.gamma primals_4 = self.layernorm.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
longnsl1998/vietocr
LearnedPositionalEncoding
false
10,411
[ "Apache-2.0" ]
0
686dd6c9d897e0401c20e7dcadb07a07c1dbc284
https://github.com/longnsl1998/vietocr/tree/686dd6c9d897e0401c20e7dcadb07a07c1dbc284
CrossNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/52/c52kaji6eyiltsqxp7w6aivnhxv5zwp3fyswckudklooo5a6gore.py # Topologically Sorted Source Nodes: [xl_w], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # xl_w => clone, view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [64, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + ((16*x1) + (64*(y0 // 16)) + (y0 % 16)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c5/cc5niyk56fa3m2oc45eqgnf3eeposiu56eefgo6q2ozqlq3nwxnc.py # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] # Source node to ATen node mapping: # dot_ => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/p5/cp5pohlvud22ry7jg7rk2cbrxg2zgd5m6ftrmu3zisebczw32ykt.py # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] # Source node to ATen node mapping: # dot_ => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rj/crjfvijs5tvs7lmrx3tnlbqpiqf6fftdmggpgew3khzaaiwhtbmp.py # Topologically Sorted Source Nodes: [xl_w_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # xl_w_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = (xindex // 256) x5 = (xindex // 16) % 16 x2 = (xindex // 16) % 4 x6 = (xindex // 4) % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x5 + (16*x0) + (64*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x6 + (16*x0) + (64*x4)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x7), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ho/cho3ytdwjrelha3yb4kblaihfa7pdkxvm7w76zy4lipwg7jvufdx.py # Topologically Sorted Source Nodes: [add, x_l, add_2, x_l_1, x_l_2], Original ATen: [aten.add, aten.squeeze] # Source node to ATen node mapping: # add => add # add_2 => add_2 # x_l => add_1 # x_l_1 => add_3 # x_l_2 => squeeze # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %select_1), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %select_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_1), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%add_3, 2), kwargs = {}) triton_poi_fused_add_squeeze_4 = async_compile.triton('triton_poi_fused_add_squeeze_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_squeeze_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_squeeze_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x1 = (xindex // 4) % 4 x3 = (xindex // 64) x6 = xindex % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x6 + (16*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp2 + tmp7 tl.store(out_ptr0 + (x7), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (2, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [xl_w], Original ATen: [aten.clone, aten._unsafe_view] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [xl_w], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_1, buf2, 1024, grid=grid(1024), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf1, buf3, 256, grid=grid(256), stream=stream0) del buf1 buf4 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [xl_w_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, primals_3, primals_1, buf5, 1024, grid=grid(1024), stream=stream0) buf6 = reinterpret_tensor(buf3, (256, 1), (1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [xl_w_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 1), 4), out=buf6) buf7 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot__1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (64, 4, 1), (4, 1, 1), 0), out=buf7) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, x_l, add_2, x_l_1, x_l_2], Original ATen: [aten.add, aten.squeeze] triton_poi_fused_add_squeeze_4.run(buf7, primals_3, buf4, primals_1, buf8, 1024, grid=grid(1024), stream=stream0) del buf4 del buf7 del primals_1 del primals_3 return (buf8, reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 256), (1, 4), 0), reinterpret_tensor(primals_2, (1, 4), (1, 1), 4), reinterpret_tensor(buf0, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from sklearn.metrics import * import torch.onnx import torch as torch class CrossNet(nn.Module): """The Cross Network part of Deep&Cross Network model, which leans both low and high degree cross feature. Input shape - 2D tensor with shape: ``(batch_size, units)``. Output shape - 2D tensor with shape: ``(batch_size, units)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **input_feature_num**: Positive integer, shape(Input tensor)[-1] - **layer_num**: Positive integer, the cross layer number - **parameterization**: string, ``"vector"`` or ``"matrix"`` , way to parameterize the cross network. - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix - **seed**: A Python integer to use as random seed. References - [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123) - [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535) """ def __init__(self, in_features, layer_num=2, parameterization='vector', seed=1024, device='cpu'): super(CrossNet, self).__init__() self.layer_num = layer_num self.parameterization = parameterization if self.parameterization == 'vector': self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1)) elif self.parameterization == 'matrix': self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, in_features)) else: raise ValueError("parameterization should be 'vector' or 'matrix'") self.bias = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1)) for i in range(self.kernels.shape[0]): nn.init.xavier_normal_(self.kernels[i]) for i in range(self.bias.shape[0]): nn.init.zeros_(self.bias[i]) self def forward(self, inputs): x_0 = inputs.unsqueeze(2) x_l = x_0 for i in range(self.layer_num): if self.parameterization == 'vector': xl_w = torch.tensordot(x_l, self.kernels[i], dims=([1], [0])) dot_ = torch.matmul(x_0, xl_w) x_l = dot_ + self.bias[i] + x_l elif self.parameterization == 'matrix': xl_w = torch.matmul(self.kernels[i], x_l) dot_ = xl_w + self.bias[i] x_l = x_0 * dot_ + x_l else: raise ValueError( "parameterization should be 'vector' or 'matrix'") x_l = torch.squeeze(x_l, dim=2) return x_l def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * import torch.onnx import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 256 x5 = xindex // 16 % 16 x2 = xindex // 16 % 4 x6 = xindex // 4 % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x5 + 16 * x0 + 64 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x6 + 16 * x0 + 64 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x7, tmp4, xmask) @triton.jit def triton_poi_fused_add_squeeze_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x6 = xindex % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp2 + tmp7 tl.store(out_ptr0 + x7, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (2, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 1 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(1024)](primals_1, buf2, 1024, XBLOCK= 256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_2[grid(256)](buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf4 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(1024)](buf4, primals_3, primals_1, buf5, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (256, 1), (1, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf5, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 1), 4), out=buf6) buf7 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (64, 4, 1), (4, 1, 1), 0), out=buf7) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_add_squeeze_4[grid(1024)](buf7, primals_3, buf4, primals_1, buf8, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del buf7 del primals_1 del primals_3 return buf8, reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf5, (4, 256), (1, 4), 0), reinterpret_tensor( primals_2, (1, 4), (1, 1), 4), reinterpret_tensor(buf0, (4, 64), (1, 4), 0) class CrossNetNew(nn.Module): """The Cross Network part of Deep&Cross Network model, which leans both low and high degree cross feature. Input shape - 2D tensor with shape: ``(batch_size, units)``. Output shape - 2D tensor with shape: ``(batch_size, units)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **input_feature_num**: Positive integer, shape(Input tensor)[-1] - **layer_num**: Positive integer, the cross layer number - **parameterization**: string, ``"vector"`` or ``"matrix"`` , way to parameterize the cross network. - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix - **seed**: A Python integer to use as random seed. References - [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123) - [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535) """ def __init__(self, in_features, layer_num=2, parameterization='vector', seed=1024, device='cpu'): super(CrossNetNew, self).__init__() self.layer_num = layer_num self.parameterization = parameterization if self.parameterization == 'vector': self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1)) elif self.parameterization == 'matrix': self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, in_features)) else: raise ValueError("parameterization should be 'vector' or 'matrix'") self.bias = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1)) for i in range(self.kernels.shape[0]): nn.init.xavier_normal_(self.kernels[i]) for i in range(self.bias.shape[0]): nn.init.zeros_(self.bias[i]) self def forward(self, input_0): primals_2 = self.kernels primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dulvqingyunLT/DeepCTR-Torch
CrossNet
false
10,412
[ "Apache-2.0" ]
0
f40cf08f3469aa471f9ca69e44c5de51180341cc
https://github.com/dulvqingyunLT/DeepCTR-Torch/tree/f40cf08f3469aa471f9ca69e44c5de51180341cc
ExtResNetBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cn/ccnjw5i2bmojhqd43gtvy2mo5rvcvx4kyqocflzm32zw36biefd7.py # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu] # Source node to ATen node mapping: # input_2 => add, add_1, mul_1, rsqrt, var_mean # input_3 => expm1, gt, mul_2, mul_4, where # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_6), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_2, %mul_4), kwargs = {}) triton_per_fused_elu_native_group_norm_0 = async_compile.triton('triton_per_fused_elu_native_group_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_elu_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 1.0 tmp31 = tmp27 * tmp30 tmp32 = libdevice.expm1(tmp31) tmp33 = tmp32 * tmp30 tmp34 = tl.where(tmp29, tmp31, tmp33) tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp34, xmask) tl.store(out_ptr2 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ql/cql5y77ikb3qyybctpvmmqkg56j3yahci6d3fpavhe5gjjt6labu.py # Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu] # Source node to ATen node mapping: # input_8 => add_4, add_5, mul_11, rsqrt_2, var_mean_2 # out => add_6 # out_1 => expm1_2, gt_2, mul_12, mul_14, where_2 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_22), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %unsqueeze_19), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %where), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {}) # %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_12, %mul_14), kwargs = {}) triton_per_fused_add_elu_native_group_norm_1 = async_compile.triton('triton_per_fused_add_elu_native_group_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_elu_native_group_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp32 = 1.0 tmp33 = tmp29 * tmp32 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp32 tmp36 = tl.where(tmp31, tmp33, tmp35) tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp36, xmask) tl.store(out_ptr2 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf4; del buf4 # reuse buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu] stream0 = get_raw_stream(0) triton_per_fused_elu_native_group_norm_0.run(buf6, buf0, primals_3, primals_4, buf1, buf5, 16, 16, grid=grid(16), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf11; del buf11 # reuse buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.native_group_norm, aten.elu] triton_per_fused_elu_native_group_norm_0.run(buf13, buf7, primals_6, primals_7, buf8, buf12, 16, 16, grid=grid(16), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = buf19; del buf19 # reuse buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu] triton_per_fused_add_elu_native_group_norm_1.run(buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 16, 16, grid=grid(16), stream=stream0) del primals_10 return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor(buf18, (4, 4), (4, 1), 0), buf20, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels order (string): order of things, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int): add zero-padding to the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') assert not is_before_conv, 'GroupNorm MUST go after the Conv3d' if out_channels < num_groups: num_groups = out_channels modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'crg', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlock(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlock, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) residual = out out = self.conv2(out) out = self.conv3(out) out += residual out = self.non_linearity(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 1.0 tmp31 = tmp27 * tmp30 tmp32 = libdevice.expm1(tmp31) tmp33 = tmp32 * tmp30 tmp34 = tl.where(tmp29, tmp31, tmp33) tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp34, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp32 = 1.0 tmp33 = tmp29 * tmp32 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp32 tmp36 = tl.where(tmp31, tmp33, tmp35) tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp36, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf4 del buf4 buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_per_fused_elu_native_group_norm_0[grid(16)](buf6, buf0, primals_3, primals_4, buf1, buf5, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf11 del buf11 buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_per_fused_elu_native_group_norm_0[grid(16)](buf13, buf7, primals_6, primals_7, buf8, buf12, 16, 16, XBLOCK=8, num_warps= 2, num_stages=1) del primals_7 buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = buf19 del buf19 buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_per_fused_add_elu_native_group_norm_1[grid(16)](buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_10 return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor( buf12, (4, 4), (4, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor(buf18, (4, 4), (4, 1), 0), buf20 ) def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels order (string): order of things, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int): add zero-padding to the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') assert not is_before_conv, 'GroupNorm MUST go after the Conv3d' if out_channels < num_groups: num_groups = out_channels modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'crg', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlockNew(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlockNew, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_3 = self.conv1.groupnorm.weight primals_4 = self.conv1.groupnorm.bias primals_5 = self.conv2.conv.weight primals_6 = self.conv2.groupnorm.weight primals_7 = self.conv2.groupnorm.bias primals_8 = self.conv3.conv.weight primals_9 = self.conv3.groupnorm.weight primals_10 = self.conv3.groupnorm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
joowlim/pytorch-3dunet
ExtResNetBlock
false
10,413
[ "MIT" ]
0
d08049f60b619627521efd0fb171247e1536b262
https://github.com/joowlim/pytorch-3dunet/tree/d08049f60b619627521efd0fb171247e1536b262
QNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/2i/c2ibuo46ejiy7dbwrzgygpphwuemyxqooxyjcai24lecredzgyhh.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 296 x2 = xindex % 1184 x3 = (xindex // 1184) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1280*x3)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (296, 4), (4, 1)) assert_size_stride(primals_2, (296, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (296, 296), (296, 1)) assert_size_stride(primals_5, (296, ), (1, )) assert_size_stride(primals_6, (4, 296), (296, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 296), (296, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 296), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 296), (4736, 1184, 296, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 296), (5120, 1280, 296, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 18944, grid=grid(18944), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 296), (296, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 296), (296, 1), 0), reinterpret_tensor(primals_4, (296, 296), (1, 296), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 296), (4736, 1184, 296, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 296), (5120, 1280, 296, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 18944, grid=grid(18944), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 296), (296, 1), 0), reinterpret_tensor(primals_6, (296, 4), (1, 296), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 296), (296, 1), 0), reinterpret_tensor(buf3, (64, 296), (296, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((296, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((296, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((296, 296), (296, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((296, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 296), (296, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=296, fc2_units=296): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, state): """Build a network that maps state -> action values.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 296 x2 = xindex % 1184 x3 = xindex // 1184 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1280 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (296, 4), (4, 1)) assert_size_stride(primals_2, (296,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (296, 296), (296, 1)) assert_size_stride(primals_5, (296,), (1,)) assert_size_stride(primals_6, (4, 296), (296, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 296), (296, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 296), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 296), (4736, 1184, 296, 1), 0 ) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 296), (5120, 1280, 296, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(18944)](buf1, primals_2, buf6, 18944, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 296), (296, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 296), (296, 1), 0), reinterpret_tensor(primals_4, (296, 296), (1, 296), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 296), (4736, 1184, 296, 1), 0 ) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 296), (5120, 1280, 296, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(18944)](buf3, primals_5, buf5, 18944, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 296), (296, 1), 0), reinterpret_tensor(primals_6, (296, 4), (1, 296), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 296), (296, 1), 0 ), reinterpret_tensor(buf3, (64, 296), (296, 1), 0 ), primals_6, buf5, primals_4, buf6 class QNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=296, fc2_units=296): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
luiz-rocha94/navigation
QNetwork
false
10,414
[ "MIT" ]
0
fd5e00d8b9051e82dfe15793e53f8d1f86e8ecbe
https://github.com/luiz-rocha94/navigation/tree/fd5e00d8b9051e82dfe15793e53f8d1f86e8ecbe
Coskx
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7w/c7wbptj2xs4dg6al5exxesdedwezdo6ffqlgla7qkjcyjfntox4v.py # Topologically Sorted Source Nodes: [mul, cos], Original ATen: [aten.mul, aten.cos] # Source node to ATen node mapping: # cos => cos # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 50), kwargs = {}) # %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%mul,), kwargs = {}) triton_poi_fused_cos_mul_0 = async_compile.triton('triton_poi_fused_cos_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cos_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 50.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.cos(tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, cos], Original ATen: [aten.mul, aten.cos] stream0 = get_raw_stream(0) triton_poi_fused_cos_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Coskx(nn.Module): def __init__(self, k=50): super(Coskx, self).__init__() self.k = k def forward(self, input): return torch.cos(input * self.k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 50.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.cos(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cos_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class CoskxNew(nn.Module): def __init__(self, k=50): super(CoskxNew, self).__init__() self.k = k def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jiaj15/SAIL
Coskx
false
10,415
[ "MIT" ]
0
734be06a2b0ae70801f59c191b86332592da97cf
https://github.com/jiaj15/SAIL/tree/734be06a2b0ae70801f59c191b86332592da97cf
GroupNorm32
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/zy/czyl6lkgkemly2rorp7dytnq5bsikpucv7yhz2var2hddv5xnkj4.py # Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid] # Source node to ATen node mapping: # mul => mul_2 # sigmoid => sigmoid # y => add, add_1, mul_1, rsqrt, var_mean # y_1 => mul_3 # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4.0), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sigmoid), kwargs = {}) triton_per_fused_mul_native_group_norm_sigmoid_0 = async_compile.triton('triton_per_fused_mul_native_group_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_native_group_norm_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(in_out_ptr1 + (r1 + (64*x0)), tmp31, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid] stream0 = get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0.run(buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, grid=grid(4), stream=stream0) return (buf5, primals_1, primals_2, primals_3, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4, 'swish': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0[grid(4)](buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) return buf5, primals_1, primals_2, primals_3, buf0, buf3 class GroupNorm32New(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
litevxx/glid-3
GroupNorm32
false
10,416
[ "MIT" ]
0
d7bd53e671d642b0cbc8af81197170b585c7e624
https://github.com/litevxx/glid-3/tree/d7bd53e671d642b0cbc8af81197170b585c7e624
Qnet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (2, 128), (128, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn import torch.nn.functional as F class Qnet(nn.Module): def __init__(self): super(Qnet, self).__init__() self.fc1 = nn.Linear(4, 128) self.fc2 = nn.Linear(128, 128) self.fc3 = nn.Linear(128, 2) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_action(self, obs, epsilon): out = self.forward(obs) coin = random.random() if coin < epsilon: return random.randint(0, 1) else: return out.argmax().item() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (2, 128), (128, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf6, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3, primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), primals_6, buf5, primals_4, buf6 class QnetNew(nn.Module): def __init__(self): super(QnetNew, self).__init__() self.fc1 = nn.Linear(4, 128) self.fc2 = nn.Linear(128, 128) self.fc3 = nn.Linear(128, 2) def get_action(self, obs, epsilon): out = self.forward(obs) coin = random.random() if coin < epsilon: return random.randint(0, 1) else: return out.argmax().item() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
linklab/link_rl_book_codes
Qnet
false
10,417
[ "MIT" ]
0
b272b46d5ecd2802f34648440ff53641c68cbbf0
https://github.com/linklab/link_rl_book_codes/tree/b272b46d5ecd2802f34648440ff53641c68cbbf0
ScaledDotAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weights_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weights_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_4, buf2, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4), (16, 4, 1)) buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf6, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf7 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf7, primals_6, 64, grid=grid(64), stream=stream0) del primals_6 buf8 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4), 0), buf7, out=buf8) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf9, buf10, 64, grid=grid(64), stream=stream0) buf11 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf11, primals_9, 64, grid=grid(64), stream=stream0) del primals_9 buf12 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf12, buf13, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4), (16, 4, 1)) del buf13 buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf15, primals_11, 64, grid=grid(64), stream=stream0) del primals_11 return (reinterpret_tensor(buf15, (4, 4, 4), (16, 1, 4), 0), buf10, primals_2, primals_5, primals_8, primals_10, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_7, (4, 4, 4), (16, 1, 4), 0), buf10, reinterpret_tensor(buf12, (4, 4, 4), (16, 1, 4), 0), buf11, buf6, reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import LayerNorm def scaled_dot_attention(q, k, v, mask=None, noise=0, dropout=lambda x: x): """ :param q: queries, (batch, time1, channels1) :param k: keys, (batch, time2, channels1) :param v: values, (batch, time2, channels2) :param mask: boolean mask, (batch, time1, time2) :param dropout: a dropout function - this allows keeping dropout as a module -> better control when training/eval :return: (batch, time1, channels2), (batch, time1, time2) """ weights = torch.matmul(q, k.transpose(2, 1)) if mask is not None: weights = weights.masked_fill(~mask, float('-inf')) if noise: weights += noise * torch.randn(weights.shape) weights = torch.softmax(weights, dim=-1) weights = dropout(weights) result = torch.matmul(weights, v) return result, weights def mask(x, lengths, dim=-1): assert dim != 0, 'Masking not available for batch dimension' assert len(lengths) == x.shape[0 ], 'Lengths must contain as many elements as there are items in the batch' lengths = torch.as_tensor(lengths) to_expand = [1] * (x.ndim - 1) + [-1] mask = torch.arange(x.shape[dim]).expand(to_expand).transpose(dim, -1 ).expand(x.shape) mask = mask < lengths.expand(to_expand).transpose(0, -1) return mask class Conv1d(nn.Conv1d): """A wrapper around nn.Conv1d, that works on (batch, time, channels)""" def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, groups=1, bias=True, padding=0): super(Conv1d, self).__init__(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, dilation= dilation, groups=groups, bias=bias, padding=padding) def forward(self, x): return super().forward(x.transpose(2, 1)).transpose(2, 1) class ScaledDotAttention(nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, noise=0, normalize=False, dropout=False): super(ScaledDotAttention, self).__init__() self.noise = noise self.dropout = torch.nn.Dropout(p=dropout) self.normalize = normalize self.fc_query = Conv1d(in_channels, hidden_channels) self.fc_keys = Conv1d(in_channels, hidden_channels) if normalize: self.qnorm = LayerNorm(in_channels) self.knorm = LayerNorm(in_channels) self.fc_keys.weight = torch.nn.Parameter(self.fc_query.weight.clone()) self.fc_keys.bias = torch.nn.Parameter(self.fc_query.bias.clone()) self.fc_values = Conv1d(in_channels, hidden_channels) self.fc_out = Conv1d(hidden_channels, out_channels) def forward(self, q, k, v, mask=None): """ :param q: queries, (batch, time1, channels1) :param k: keys, (batch, time2, channels1) :param v: values, (batch, time2, channels2) :param mask: boolean mask, (batch, time1, time2) :return: (batch, time1, channels2), (batch, time1, time2) """ noise = self.noise if self.training else 0 if self.normalize: q = self.qnorm(q) k = self.knorm(k) alignment, weights = scaled_dot_attention(self.fc_query(q), self. fc_keys(k), self.fc_values(v), mask, noise=noise, dropout=self. dropout) alignment = self.fc_out(alignment) return alignment, weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'in_channels': 4, 'hidden_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import LayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf0 del buf0 triton_poi_fused_convolution_0[grid(16, 4)](primals_4, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf2 del buf2 triton_poi_fused_convolution_0[grid(16, 4)](primals_7, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4), (16, 4, 1)) buf6 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf6, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf7 = buf3 del buf3 triton_poi_fused_convolution_1[grid(64)](buf7, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf8 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4), 0), buf7, out=buf8) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf8 del buf8 triton_poi_fused__softmax_3[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = buf5 del buf5 triton_poi_fused_convolution_1[grid(64)](buf11, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf12 = buf9 del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_0[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4), (16, 4, 1)) del buf13 buf15 = buf14 del buf14 triton_poi_fused_convolution_1[grid(64)](buf15, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 return reinterpret_tensor(buf15, (4, 4, 4), (16, 1, 4), 0 ), buf10, primals_2, primals_5, primals_8, primals_10, reinterpret_tensor( primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_7, (4, 4, 4), (16, 1, 4), 0), buf10, reinterpret_tensor(buf12, (4, 4, 4), (16, 1, 4), 0), buf11, buf6, reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0) def scaled_dot_attention(q, k, v, mask=None, noise=0, dropout=lambda x: x): """ :param q: queries, (batch, time1, channels1) :param k: keys, (batch, time2, channels1) :param v: values, (batch, time2, channels2) :param mask: boolean mask, (batch, time1, time2) :param dropout: a dropout function - this allows keeping dropout as a module -> better control when training/eval :return: (batch, time1, channels2), (batch, time1, time2) """ weights = torch.matmul(q, k.transpose(2, 1)) if mask is not None: weights = weights.masked_fill(~mask, float('-inf')) if noise: weights += noise * torch.randn(weights.shape) weights = torch.softmax(weights, dim=-1) weights = dropout(weights) result = torch.matmul(weights, v) return result, weights def mask(x, lengths, dim=-1): assert dim != 0, 'Masking not available for batch dimension' assert len(lengths) == x.shape[0 ], 'Lengths must contain as many elements as there are items in the batch' lengths = torch.as_tensor(lengths) to_expand = [1] * (x.ndim - 1) + [-1] mask = torch.arange(x.shape[dim]).expand(to_expand).transpose(dim, -1 ).expand(x.shape) mask = mask < lengths.expand(to_expand).transpose(0, -1) return mask class Conv1d(nn.Conv1d): """A wrapper around nn.Conv1d, that works on (batch, time, channels)""" def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, groups=1, bias=True, padding=0): super(Conv1d, self).__init__(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, dilation= dilation, groups=groups, bias=bias, padding=padding) def forward(self, x): return super().forward(x.transpose(2, 1)).transpose(2, 1) class ScaledDotAttentionNew(nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, noise=0, normalize=False, dropout=False): super(ScaledDotAttentionNew, self).__init__() self.noise = noise self.dropout = torch.nn.Dropout(p=dropout) self.normalize = normalize self.fc_query = Conv1d(in_channels, hidden_channels) self.fc_keys = Conv1d(in_channels, hidden_channels) if normalize: self.qnorm = LayerNorm(in_channels) self.knorm = LayerNorm(in_channels) self.fc_keys.weight = torch.nn.Parameter(self.fc_query.weight.clone()) self.fc_keys.bias = torch.nn.Parameter(self.fc_query.bias.clone()) self.fc_values = Conv1d(in_channels, hidden_channels) self.fc_out = Conv1d(hidden_channels, out_channels) def forward(self, input_0, input_1, input_2): primals_2 = self.fc_query.weight primals_3 = self.fc_query.bias primals_5 = self.fc_keys.weight primals_6 = self.fc_keys.bias primals_8 = self.fc_values.weight primals_9 = self.fc_values.bias primals_10 = self.fc_out.weight primals_11 = self.fc_out.bias primals_1 = input_0 primals_4 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
iclementine/speedyspeech
ScaledDotAttention
false
10,418
[ "BSD-3-Clause" ]
0
db527587a3699b71082d61c9e9fad7ed795d1980
https://github.com/iclementine/speedyspeech/tree/db527587a3699b71082d61c9e9fad7ed795d1980
Decoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (4, 128), (128, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class Decoder(torch.nn.Module): def __init__(self, input_dim, out_dim, hidden_size=128): super(Decoder, self).__init__() self.linear1 = torch.nn.Linear(input_dim, hidden_size) self.linear2 = torch.nn.Linear(hidden_size, hidden_size) self.linear3 = torch.nn.Linear(hidden_size, out_dim) self.apply(weights_init_) def forward(self, x): x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = self.linear3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (4, 128), (128, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf6, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3, primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), primals_6, buf5, primals_4, buf6 def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class DecoderNew(torch.nn.Module): def __init__(self, input_dim, out_dim, hidden_size=128): super(DecoderNew, self).__init__() self.linear1 = torch.nn.Linear(input_dim, hidden_size) self.linear2 = torch.nn.Linear(hidden_size, hidden_size) self.linear3 = torch.nn.Linear(hidden_size, out_dim) self.apply(weights_init_) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jiaj15/SAIL
Decoder
false
10,419
[ "MIT" ]
0
734be06a2b0ae70801f59c191b86332592da97cf
https://github.com/jiaj15/SAIL/tree/734be06a2b0ae70801f59c191b86332592da97cf
PolicyNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PolicyNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size=256): super(PolicyNetwork, self).__init__() self.num_actions = num_actions self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear2 = nn.Linear(hidden_size, num_actions) def forward(self, state): x = F.relu(self.linear1(state)) x = F.softmax(self.linear2(x), dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), buf4, primals_4, buf5 class PolicyNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size=256): super(PolicyNetworkNew, self).__init__() self.num_actions = num_actions self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear2 = nn.Linear(hidden_size, num_actions) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
linklab/link_rl_book_codes
PolicyNetwork
false
10,420
[ "MIT" ]
0
b272b46d5ecd2802f34648440ff53641c68cbbf0
https://github.com/linklab/link_rl_book_codes/tree/b272b46d5ecd2802f34648440ff53641c68cbbf0
ActorCriticNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [value], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # value => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [policy_dist_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy_dist_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [policy_dist_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy_dist_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 256), (256, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (256, 4), (4, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (4, 256), (256, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 16384, grid=grid(16384), stream=stream0) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 256), (1, 4), 0), out=buf4) del primals_6 buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf4 # reuse buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [policy_dist], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf9, 16384, grid=grid(16384), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [policy_dist_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [policy_dist_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf5, (64, 256), (256, 1), 0), buf8, primals_8, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ActorCriticNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size=256): super(ActorCriticNetwork, self).__init__() self.num_actions = num_actions self.critic_linear1 = nn.Linear(num_inputs, hidden_size) self.critic_linear2 = nn.Linear(hidden_size, 1) self.actor_linear1 = nn.Linear(num_inputs, hidden_size) self.actor_linear2 = nn.Linear(hidden_size, num_actions) def forward(self, state_tensor): value = F.relu(self.critic_linear1(state_tensor)) value = self.critic_linear2(value) policy_dist = F.relu(self.actor_linear1(state_tensor)) policy_dist = F.softmax(self.actor_linear2(policy_dist), dim=1) return value, policy_dist def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 256), (256, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (256, 4), (4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 256), (256, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf10, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 256), (1, 4), 0), out=buf4) del primals_6 buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf4 buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf5, primals_7, buf9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf5, (64, 256), (256, 1), 0 ), buf8, primals_8, buf9, primals_4, buf10 class ActorCriticNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size=256): super(ActorCriticNetworkNew, self).__init__() self.num_actions = num_actions self.critic_linear1 = nn.Linear(num_inputs, hidden_size) self.critic_linear2 = nn.Linear(hidden_size, 1) self.actor_linear1 = nn.Linear(num_inputs, hidden_size) self.actor_linear2 = nn.Linear(hidden_size, num_actions) def forward(self, input_0): primals_1 = self.critic_linear1.weight primals_2 = self.critic_linear1.bias primals_4 = self.critic_linear2.weight primals_5 = self.critic_linear2.bias primals_6 = self.actor_linear1.weight primals_7 = self.actor_linear1.bias primals_8 = self.actor_linear2.weight primals_9 = self.actor_linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
linklab/link_rl_book_codes
ActorCriticNetwork
false
10,421
[ "MIT" ]
0
b272b46d5ecd2802f34648440ff53641c68cbbf0
https://github.com/linklab/link_rl_book_codes/tree/b272b46d5ecd2802f34648440ff53641c68cbbf0
SE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean] # Source node to ATen node mapping: # out => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6u/c6utrmzep5ziyaxjrlcwvhjg7wjjd7tfipeakfphf2az5qxhw3yf.py # Topologically Sorted Source Nodes: [conv2d, sigmoid, out_1], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # conv2d => convolution # out_1 => mul # sigmoid => sigmoid # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) triton_poi_fused_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # out_2 => sigmoid_1 # out_3 => mul_1 # Graph fragment: # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid_1), kwargs = {}) triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, sigmoid, out_1], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] triton_poi_fused_convolution_mul_sigmoid_1.run(buf3, primals_3, buf4, 16, grid=grid(16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_3.run(primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0) return (buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def swish(x): return x * x.sigmoid() class SE(nn.Module): """Squeeze-and-Excitation block with Swish.""" def __init__(self, in_planes, se_planes): super(SE, self).__init__() self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True) self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True) def forward(self, x): out = F.adaptive_avg_pool2d(x, (1, 1)) out = swish(self.se1(out)) out = self.se2(out).sigmoid() out = x * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'se_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_mul_sigmoid_1[grid(16)](buf3, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6 def swish(x): return x * x.sigmoid() class SENew(nn.Module): """Squeeze-and-Excitation block with Swish.""" def __init__(self, in_planes, se_planes): super(SENew, self).__init__() self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True) self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True) def forward(self, input_0): primals_2 = self.se1.weight primals_3 = self.se1.bias primals_4 = self.se2.weight primals_5 = self.se2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
liormagram/pytorch-cifar
SE
false
10,422
[ "MIT" ]
0
2ed0fabe6cbd4a468c5c4d155fb76c5b9ad4a764
https://github.com/liormagram/pytorch-cifar/tree/2ed0fabe6cbd4a468c5c4d155fb76c5b9ad4a764
MultiHeadQKVAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wb/cwbnzhah456ssqf5k4ob4llmezbi7o6givtx6ppibkklzj3kmilo.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + (16*y0)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wl/cwll6lomrruls6u4c3ua52p4g5jvoxwf6bvyj4dolvf23rk3zogz.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2) del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_5, buf3, 4, 16, grid=grid(4, 16), stream=stream0) del primals_5 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_7, buf4, 4, 16, grid=grid(4, 16), stream=stream0) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_9, buf8, 4, 16, grid=grid(4, 16), stream=stream0) del primals_9 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [o], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_k': 4, 'd_v': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2) del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](buf0, primals_5, buf3, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_5 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(4, 16)](buf1, primals_7, buf4, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(4, 16)](buf2, primals_9, buf8, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_9 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttentionNew(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, input_0, input_1, input_2): primals_4 = self.q_projector.weight primals_5 = self.q_projector.bias primals_6 = self.k_projector.weight primals_7 = self.k_projector.bias primals_8 = self.v_projector.weight primals_9 = self.v_projector.bias primals_10 = self.o_projector.weight primals_11 = self.o_projector.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
karayanni/torch-scae
MultiHeadQKVAttention
false
10,423
[ "Apache-2.0" ]
0
e044662d8942d8d1923d13d071f375144cf4a1e8
https://github.com/karayanni/torch-scae/tree/e044662d8942d8d1923d13d071f375144cf4a1e8
AFMLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/oj/coji63cmjptmfiahnhfxrcymtijnwomdesxsksu5cd5o6hnjtmkc.py # Topologically Sorted Source Nodes: [p, q, inner_product], Original ATen: [aten.cat, aten.mul] # Source node to ATen node mapping: # inner_product => mul # p => cat # q => cat_1 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select, %select, %select, %select_1, %select_1, %select_2], 1), kwargs = {}) # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_1, %select_2, %select_3, %select_2, %select_3, %select_3], 1), kwargs = {}) # %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, %cat_1), kwargs = {}) triton_poi_fused_cat_mul_0 = async_compile.triton('triton_poi_fused_cat_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 24 x0 = xindex % 4 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (64 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tl.load(in_ptr0 + (128 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tmp35 = tl.load(in_ptr0 + (64 + x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp36 = tl.load(in_ptr0 + (128 + x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0) tmp37 = tl.load(in_ptr0 + (192 + x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (128 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0) tmp39 = tl.load(in_ptr0 + (192 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0) tmp40 = tl.load(in_ptr0 + (192 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0) tmp41 = tl.where(tmp24, tmp39, tmp40) tmp42 = tl.where(tmp19, tmp38, tmp41) tmp43 = tl.where(tmp14, tmp37, tmp42) tmp44 = tl.where(tmp9, tmp36, tmp43) tmp45 = tl.where(tmp4, tmp35, tmp44) tmp46 = tmp34 * tmp45 tl.store(in_out_ptr0 + (x3), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/j2/cj26ownu73m72kwjlseu3qfwtrz4f3ru464aa4zuhodtujlnjupm.py # Topologically Sorted Source Nodes: [add, attention_temp], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add # attention_temp => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/p4/cp4mdcdve4y73ad5mzhckzksofhes3a2n2zye5hynnmbc62ct27d.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (24*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (24*x0)), tmp11, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ui/cui4pbynpryqmgmjhsdzeompa6sltxsmy5ggopxiaqdlvyafsjpl.py # Topologically Sorted Source Nodes: [mul_1, attention_output], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # attention_output => sum_2 # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %mul), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_per_fused_mul_sum_3 = async_compile.triton('triton_per_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 32], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r2 = rindex x1 = (xindex // 4) x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (24*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (96*x1)), rmask & xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(rmask & xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [p, q, inner_product], Original ATen: [aten.cat, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_cat_mul_0.run(buf2, primals_1, 384, grid=grid(384), stream=stream0) del primals_1 buf3 = empty_strided_cuda((96, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensordot], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (96, 4), (4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = reinterpret_tensor(buf3, (4, 24, 4), (96, 4, 1), 0); del buf3 # reuse buf11 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, attention_temp], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_1.run(buf4, primals_3, buf11, 384, grid=grid(384), stream=stream0) del primals_3 buf5 = empty_strided_cuda((96, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [tensordot_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (96, 4), (4, 1), 0), primals_4, out=buf5) buf8 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_2.run(buf5, buf8, 4, 24, grid=grid(4), stream=stream0) del buf5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, attention_output], Original ATen: [aten.mul, aten.sum] triton_per_fused_mul_sum_3.run(buf8, buf2, buf9, 16, 24, grid=grid(16), stream=stream0) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [afm_out], Original ATen: [aten.mm] extern_kernels.mm(buf9, primals_5, out=buf10) return (buf10, buf8, buf2, buf8, reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0), reinterpret_tensor(buf4, (4, 96), (1, 4), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import itertools import torch import torch.nn as nn import torch.nn.functional as F from sklearn.metrics import * import torch.onnx import torch as torch class AFMLayer(nn.Module): """Attentonal Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **attention_factor** : Positive integer, dimensionality of the attention network output space. - **l2_reg_w** : float between 0 and 1. L2 regularizer strength applied to attention network. - **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout. - **seed** : A Python integer to use as random seed. References - [Attentional Factorization Machines : Learning the Weight of Feature Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf) """ def __init__(self, in_features, attention_factor=4, l2_reg_w=0, dropout_rate=0, seed=1024, device='cpu'): super(AFMLayer, self).__init__() self.attention_factor = attention_factor self.l2_reg_w = l2_reg_w self.dropout_rate = dropout_rate self.seed = seed embedding_size = in_features self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self. attention_factor)) self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor)) self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1) ) self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1)) for tensor in [self.attention_W, self.projection_h, self.projection_p]: nn.init.xavier_normal_(tensor) for tensor in [self.attention_b]: nn.init.zeros_(tensor) self.dropout = nn.Dropout(dropout_rate) self def forward(self, inputs): embeds_vec_list = inputs row = [] col = [] for r, c in itertools.combinations(embeds_vec_list, 2): row.append(r) col.append(c) p = torch.cat(row, dim=1) q = torch.cat(col, dim=1) inner_product = p * q bi_interaction = inner_product attention_temp = F.relu(torch.tensordot(bi_interaction, self. attention_W, dims=([-1], [0])) + self.attention_b) self.normalized_att_score = F.softmax(torch.tensordot( attention_temp, self.projection_h, dims=([-1], [0])), dim=1) attention_output = torch.sum(self.normalized_att_score * bi_interaction, dim=1) attention_output = self.dropout(attention_output) afm_out = torch.tensordot(attention_output, self.projection_p, dims =([-1], [0])) return afm_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from sklearn.metrics import * import torch.onnx import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24 x0 = xindex % 4 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tmp35 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp36 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp37 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp39 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp40 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp41 = tl.where(tmp24, tmp39, tmp40) tmp42 = tl.where(tmp19, tmp38, tmp41) tmp43 = tl.where(tmp14, tmp37, tmp42) tmp44 = tl.where(tmp9, tmp36, tmp43) tmp45 = tl.where(tmp4, tmp35, tmp44) tmp46 = tmp34 * tmp45 tl.store(in_out_ptr0 + x3, tmp46, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 24 * x0), tmp11, rmask & xmask) @triton.jit def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x1 = xindex // 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 24 * x1), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 96 * x1), rmask & xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(rmask & xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_cat_mul_0[grid(384)](buf2, primals_1, 384, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((96, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (96, 4), (4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = reinterpret_tensor(buf3, (4, 24, 4), (96, 4, 1), 0) del buf3 buf11 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(384)](buf4, primals_3, buf11, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((96, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (96, 4), (4, 1), 0), primals_4, out=buf5) buf8 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32) triton_per_fused__softmax_2[grid(4)](buf5, buf8, 4, 24, XBLOCK=1, num_warps=2, num_stages=1) del buf5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_mul_sum_3[grid(16)](buf8, buf2, buf9, 16, 24, XBLOCK=1, num_warps=2, num_stages=1) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_5, out=buf10) return buf10, buf8, buf2, buf8, reinterpret_tensor(buf9, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0 ), reinterpret_tensor(buf4, (4, 96), (1, 4), 0), reinterpret_tensor( primals_4, (1, 4), (1, 1), 0), buf11 class AFMLayerNew(nn.Module): """Attentonal Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **attention_factor** : Positive integer, dimensionality of the attention network output space. - **l2_reg_w** : float between 0 and 1. L2 regularizer strength applied to attention network. - **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout. - **seed** : A Python integer to use as random seed. References - [Attentional Factorization Machines : Learning the Weight of Feature Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf) """ def __init__(self, in_features, attention_factor=4, l2_reg_w=0, dropout_rate=0, seed=1024, device='cpu'): super(AFMLayerNew, self).__init__() self.attention_factor = attention_factor self.l2_reg_w = l2_reg_w self.dropout_rate = dropout_rate self.seed = seed embedding_size = in_features self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self. attention_factor)) self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor)) self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1) ) self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1)) for tensor in [self.attention_W, self.projection_h, self.projection_p]: nn.init.xavier_normal_(tensor) for tensor in [self.attention_b]: nn.init.zeros_(tensor) self.dropout = nn.Dropout(dropout_rate) self def forward(self, input_0): primals_2 = self.attention_W primals_3 = self.attention_b primals_4 = self.projection_h primals_5 = self.projection_p primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dulvqingyunLT/DeepCTR-Torch
AFMLayer
false
10,424
[ "Apache-2.0" ]
0
f40cf08f3469aa471f9ca69e44c5de51180341cc
https://github.com/dulvqingyunLT/DeepCTR-Torch/tree/f40cf08f3469aa471f9ca69e44c5de51180341cc
DRRN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/77/c773z3ksgrnasomgfhaw574ix3f2o4yv2n3kziqh4wol2y4gnmrr.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ej/cejmpqb52jrqlle33cesamyex5ioj4utjclnb7tlrznwezqa5gnx.py # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=27] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/q5/cq5vgxowbn4agnifxmmz3bdax3cn5mjnmy57emu5bjdwqabakzok.py # Topologically Sorted Source Nodes: [out_1, relu_3], Original ATen: [aten.add, aten.relu] # Source node to ATen node mapping: # out_1 => add # relu_3 => relu_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %relu_1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) triton_poi_fused_add_relu_2 = async_compile.triton('triton_poi_fused_add_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qf/cqfjpp4227icr7ijrkgfudkoifwsutjs7zhnrdiifjqxsnu7xuij.py # Topologically Sorted Source Nodes: [out_51], Original ATen: [aten.add] # Source node to ATen node mapping: # out_51 => add_25 # Graph fragment: # %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_51, %relu), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %relu), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) tl.store(out_ptr0 + (x0), tmp1, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (128, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_5, (1, 128, 3, 3), (1152, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [relu_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [out_1, relu_3], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf6, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [relu_4], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf8, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [out_3, relu_5], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf10, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [relu_6], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf12, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [out_5, relu_7], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf14, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [relu_8], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf16, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [out_7, relu_9], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf18, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [relu_10], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf20, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [out_9, relu_11], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf22, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf24 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [relu_12], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf24, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution] buf25 = extern_kernels.convolution(buf24, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf26 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [out_11, relu_13], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf26, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [relu_14], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf28, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf30 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [out_13, relu_15], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf30, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [relu_16], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf32, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] buf33 = extern_kernels.convolution(buf32, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf34 = buf33; del buf33 # reuse # Topologically Sorted Source Nodes: [out_15, relu_17], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf34, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf36 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [relu_18], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf36, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf38 = buf37; del buf37 # reuse # Topologically Sorted Source Nodes: [out_17, relu_19], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf38, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [relu_20], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf40, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution] buf41 = extern_kernels.convolution(buf40, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf42 = buf41; del buf41 # reuse # Topologically Sorted Source Nodes: [out_19, relu_21], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf42, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf42, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf44 = buf43; del buf43 # reuse # Topologically Sorted Source Nodes: [relu_22], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf44, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_20], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf46 = buf45; del buf45 # reuse # Topologically Sorted Source Nodes: [out_21, relu_23], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf46, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_23], Original ATen: [aten.convolution] buf47 = extern_kernels.convolution(buf46, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf48 = buf47; del buf47 # reuse # Topologically Sorted Source Nodes: [relu_24], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf48, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_22], Original ATen: [aten.convolution] buf49 = extern_kernels.convolution(buf48, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf50 = buf49; del buf49 # reuse # Topologically Sorted Source Nodes: [out_23, relu_25], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf50, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf50, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf52 = buf51; del buf51 # reuse # Topologically Sorted Source Nodes: [relu_26], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf52, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_24], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf52, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf54 = buf53; del buf53 # reuse # Topologically Sorted Source Nodes: [out_25, relu_27], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf54, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_27], Original ATen: [aten.convolution] buf55 = extern_kernels.convolution(buf54, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf56 = buf55; del buf55 # reuse # Topologically Sorted Source Nodes: [relu_28], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf56, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_26], Original ATen: [aten.convolution] buf57 = extern_kernels.convolution(buf56, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf58 = buf57; del buf57 # reuse # Topologically Sorted Source Nodes: [out_27, relu_29], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf58, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_29], Original ATen: [aten.convolution] buf59 = extern_kernels.convolution(buf58, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf60 = buf59; del buf59 # reuse # Topologically Sorted Source Nodes: [relu_30], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf60, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_28], Original ATen: [aten.convolution] buf61 = extern_kernels.convolution(buf60, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf62 = buf61; del buf61 # reuse # Topologically Sorted Source Nodes: [out_29, relu_31], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf62, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_31], Original ATen: [aten.convolution] buf63 = extern_kernels.convolution(buf62, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf64 = buf63; del buf63 # reuse # Topologically Sorted Source Nodes: [relu_32], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf64, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_30], Original ATen: [aten.convolution] buf65 = extern_kernels.convolution(buf64, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf66 = buf65; del buf65 # reuse # Topologically Sorted Source Nodes: [out_31, relu_33], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf66, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_33], Original ATen: [aten.convolution] buf67 = extern_kernels.convolution(buf66, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf68 = buf67; del buf67 # reuse # Topologically Sorted Source Nodes: [relu_34], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf68, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_32], Original ATen: [aten.convolution] buf69 = extern_kernels.convolution(buf68, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf70 = buf69; del buf69 # reuse # Topologically Sorted Source Nodes: [out_33, relu_35], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf70, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_35], Original ATen: [aten.convolution] buf71 = extern_kernels.convolution(buf70, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf71, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf72 = buf71; del buf71 # reuse # Topologically Sorted Source Nodes: [relu_36], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf72, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution] buf73 = extern_kernels.convolution(buf72, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf73, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf74 = buf73; del buf73 # reuse # Topologically Sorted Source Nodes: [out_35, relu_37], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf74, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_37], Original ATen: [aten.convolution] buf75 = extern_kernels.convolution(buf74, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf75, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf76 = buf75; del buf75 # reuse # Topologically Sorted Source Nodes: [relu_38], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf76, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_36], Original ATen: [aten.convolution] buf77 = extern_kernels.convolution(buf76, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf78 = buf77; del buf77 # reuse # Topologically Sorted Source Nodes: [out_37, relu_39], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf78, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_39], Original ATen: [aten.convolution] buf79 = extern_kernels.convolution(buf78, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf80 = buf79; del buf79 # reuse # Topologically Sorted Source Nodes: [relu_40], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf80, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution] buf81 = extern_kernels.convolution(buf80, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf81, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf82 = buf81; del buf81 # reuse # Topologically Sorted Source Nodes: [out_39, relu_41], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf82, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_41], Original ATen: [aten.convolution] buf83 = extern_kernels.convolution(buf82, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf84 = buf83; del buf83 # reuse # Topologically Sorted Source Nodes: [relu_42], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf84, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_40], Original ATen: [aten.convolution] buf85 = extern_kernels.convolution(buf84, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf85, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf86 = buf85; del buf85 # reuse # Topologically Sorted Source Nodes: [out_41, relu_43], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf86, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_43], Original ATen: [aten.convolution] buf87 = extern_kernels.convolution(buf86, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf88 = buf87; del buf87 # reuse # Topologically Sorted Source Nodes: [relu_44], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf88, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_42], Original ATen: [aten.convolution] buf89 = extern_kernels.convolution(buf88, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf89, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf90 = buf89; del buf89 # reuse # Topologically Sorted Source Nodes: [out_43, relu_45], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf90, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_45], Original ATen: [aten.convolution] buf91 = extern_kernels.convolution(buf90, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf92 = buf91; del buf91 # reuse # Topologically Sorted Source Nodes: [relu_46], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf92, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_44], Original ATen: [aten.convolution] buf93 = extern_kernels.convolution(buf92, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf93, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf94 = buf93; del buf93 # reuse # Topologically Sorted Source Nodes: [out_45, relu_47], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf94, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_47], Original ATen: [aten.convolution] buf95 = extern_kernels.convolution(buf94, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf95, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf96 = buf95; del buf95 # reuse # Topologically Sorted Source Nodes: [relu_48], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf96, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution] buf97 = extern_kernels.convolution(buf96, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf97, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf98 = buf97; del buf97 # reuse # Topologically Sorted Source Nodes: [out_47, relu_49], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf98, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_49], Original ATen: [aten.convolution] buf99 = extern_kernels.convolution(buf98, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf100 = buf99; del buf99 # reuse # Topologically Sorted Source Nodes: [relu_50], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf100, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_48], Original ATen: [aten.convolution] buf101 = extern_kernels.convolution(buf100, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf102 = buf101; del buf101 # reuse # Topologically Sorted Source Nodes: [out_49, relu_51], Original ATen: [aten.add, aten.relu] triton_poi_fused_add_relu_2.run(buf102, buf2, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [out_50], Original ATen: [aten.convolution] buf103 = extern_kernels.convolution(buf102, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf103, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf104 = buf103; del buf103 # reuse # Topologically Sorted Source Nodes: [out_51], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf104, buf0, primals_1, 16384, grid=grid(16384), stream=stream0) del primals_1 return (buf104, primals_2, primals_3, primals_4, primals_5, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf38, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf62, buf64, buf66, buf68, buf70, buf72, buf74, buf76, buf78, buf80, buf82, buf84, buf86, buf88, buf90, buf92, buf94, buf96, buf98, buf100, buf102, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt class DRRN(nn.Module): def __init__(self): super(DRRN, self).__init__() self.input = nn.Conv2d(in_channels=1, out_channels=128, kernel_size =3, stride=1, padding=1, bias=False) self.conv1 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False) self.output = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, sqrt(2.0 / n)) def forward(self, x): residual = x inputs = self.input(self.relu(x)) out = inputs for _ in range(25): out = self.conv2(self.relu(self.conv1(self.relu(out)))) out = torch.add(out, inputs) out = self.output(self.relu(out)) out = torch.add(out, residual) return out def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_add_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, None) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) tl.store(out_ptr0 + x0, tmp1, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (128, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_5, (1, 128, 3, 3), (1152, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(16384)](primals_1, buf0, 16384, XBLOCK =256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(2097152)](buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(2097152)](buf4, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf6 = buf5 del buf5 triton_poi_fused_add_relu_2[grid(2097152)](buf6, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_relu_1[grid(2097152)](buf8, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf10 = buf9 del buf9 triton_poi_fused_add_relu_2[grid(2097152)](buf10, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(2097152)](buf12, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf14 = buf13 del buf13 triton_poi_fused_add_relu_2[grid(2097152)](buf14, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf15 = extern_kernels.convolution(buf14, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf16 = buf15 del buf15 triton_poi_fused_relu_1[grid(2097152)](buf16, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf17 = extern_kernels.convolution(buf16, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf18 = buf17 del buf17 triton_poi_fused_add_relu_2[grid(2097152)](buf18, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf20 = buf19 del buf19 triton_poi_fused_relu_1[grid(2097152)](buf20, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf21 = extern_kernels.convolution(buf20, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf22 = buf21 del buf21 triton_poi_fused_add_relu_2[grid(2097152)](buf22, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf23 = extern_kernels.convolution(buf22, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf24 = buf23 del buf23 triton_poi_fused_relu_1[grid(2097152)](buf24, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf25 = extern_kernels.convolution(buf24, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf26 = buf25 del buf25 triton_poi_fused_add_relu_2[grid(2097152)](buf26, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf27 = extern_kernels.convolution(buf26, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf28 = buf27 del buf27 triton_poi_fused_relu_1[grid(2097152)](buf28, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf30 = buf29 del buf29 triton_poi_fused_add_relu_2[grid(2097152)](buf30, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf31 = extern_kernels.convolution(buf30, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf32 = buf31 del buf31 triton_poi_fused_relu_1[grid(2097152)](buf32, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf33 = extern_kernels.convolution(buf32, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf34 = buf33 del buf33 triton_poi_fused_add_relu_2[grid(2097152)](buf34, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf35 = extern_kernels.convolution(buf34, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf36 = buf35 del buf35 triton_poi_fused_relu_1[grid(2097152)](buf36, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf37 = extern_kernels.convolution(buf36, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf38 = buf37 del buf37 triton_poi_fused_add_relu_2[grid(2097152)](buf38, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf39 = extern_kernels.convolution(buf38, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf40 = buf39 del buf39 triton_poi_fused_relu_1[grid(2097152)](buf40, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf41 = extern_kernels.convolution(buf40, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf42 = buf41 del buf41 triton_poi_fused_add_relu_2[grid(2097152)](buf42, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf43 = extern_kernels.convolution(buf42, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf44 = buf43 del buf43 triton_poi_fused_relu_1[grid(2097152)](buf44, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf45 = extern_kernels.convolution(buf44, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf46 = buf45 del buf45 triton_poi_fused_add_relu_2[grid(2097152)](buf46, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf47 = extern_kernels.convolution(buf46, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf48 = buf47 del buf47 triton_poi_fused_relu_1[grid(2097152)](buf48, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf49 = extern_kernels.convolution(buf48, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf50 = buf49 del buf49 triton_poi_fused_add_relu_2[grid(2097152)](buf50, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf51 = extern_kernels.convolution(buf50, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf52 = buf51 del buf51 triton_poi_fused_relu_1[grid(2097152)](buf52, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf53 = extern_kernels.convolution(buf52, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf54 = buf53 del buf53 triton_poi_fused_add_relu_2[grid(2097152)](buf54, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf55 = extern_kernels.convolution(buf54, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf56 = buf55 del buf55 triton_poi_fused_relu_1[grid(2097152)](buf56, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf57 = extern_kernels.convolution(buf56, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf58 = buf57 del buf57 triton_poi_fused_add_relu_2[grid(2097152)](buf58, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf59 = extern_kernels.convolution(buf58, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf60 = buf59 del buf59 triton_poi_fused_relu_1[grid(2097152)](buf60, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf61 = extern_kernels.convolution(buf60, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf62 = buf61 del buf61 triton_poi_fused_add_relu_2[grid(2097152)](buf62, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf63 = extern_kernels.convolution(buf62, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf64 = buf63 del buf63 triton_poi_fused_relu_1[grid(2097152)](buf64, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf65 = extern_kernels.convolution(buf64, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf66 = buf65 del buf65 triton_poi_fused_add_relu_2[grid(2097152)](buf66, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf67 = extern_kernels.convolution(buf66, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf68 = buf67 del buf67 triton_poi_fused_relu_1[grid(2097152)](buf68, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf69 = extern_kernels.convolution(buf68, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf70 = buf69 del buf69 triton_poi_fused_add_relu_2[grid(2097152)](buf70, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf71 = extern_kernels.convolution(buf70, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf71, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf72 = buf71 del buf71 triton_poi_fused_relu_1[grid(2097152)](buf72, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf73 = extern_kernels.convolution(buf72, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf73, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf74 = buf73 del buf73 triton_poi_fused_add_relu_2[grid(2097152)](buf74, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf75 = extern_kernels.convolution(buf74, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf75, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf76 = buf75 del buf75 triton_poi_fused_relu_1[grid(2097152)](buf76, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf77 = extern_kernels.convolution(buf76, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf78 = buf77 del buf77 triton_poi_fused_add_relu_2[grid(2097152)](buf78, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf79 = extern_kernels.convolution(buf78, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf80 = buf79 del buf79 triton_poi_fused_relu_1[grid(2097152)](buf80, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf81 = extern_kernels.convolution(buf80, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf81, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf82 = buf81 del buf81 triton_poi_fused_add_relu_2[grid(2097152)](buf82, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf83 = extern_kernels.convolution(buf82, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf84 = buf83 del buf83 triton_poi_fused_relu_1[grid(2097152)](buf84, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf85 = extern_kernels.convolution(buf84, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf85, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf86 = buf85 del buf85 triton_poi_fused_add_relu_2[grid(2097152)](buf86, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf87 = extern_kernels.convolution(buf86, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf88 = buf87 del buf87 triton_poi_fused_relu_1[grid(2097152)](buf88, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf89 = extern_kernels.convolution(buf88, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf89, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf90 = buf89 del buf89 triton_poi_fused_add_relu_2[grid(2097152)](buf90, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf91 = extern_kernels.convolution(buf90, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf92 = buf91 del buf91 triton_poi_fused_relu_1[grid(2097152)](buf92, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf93 = extern_kernels.convolution(buf92, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf93, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf94 = buf93 del buf93 triton_poi_fused_add_relu_2[grid(2097152)](buf94, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf95 = extern_kernels.convolution(buf94, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf95, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf96 = buf95 del buf95 triton_poi_fused_relu_1[grid(2097152)](buf96, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf97 = extern_kernels.convolution(buf96, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf97, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf98 = buf97 del buf97 triton_poi_fused_add_relu_2[grid(2097152)](buf98, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf99 = extern_kernels.convolution(buf98, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf100 = buf99 del buf99 triton_poi_fused_relu_1[grid(2097152)](buf100, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf101 = extern_kernels.convolution(buf100, primals_4, stride=(1, 1 ), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf101, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf102 = buf101 del buf101 triton_poi_fused_add_relu_2[grid(2097152)](buf102, buf2, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf103 = extern_kernels.convolution(buf102, primals_5, stride=(1, 1 ), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf103, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf104 = buf103 del buf103 triton_poi_fused_add_3[grid(16384)](buf104, buf0, primals_1, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return (buf104, primals_2, primals_3, primals_4, primals_5, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf38, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf62, buf64, buf66, buf68, buf70, buf72, buf74, buf76, buf78, buf80, buf82, buf84, buf86, buf88, buf90, buf92, buf94, buf96, buf98, buf100, buf102) class DRRNNew(nn.Module): def __init__(self): super(DRRNNew, self).__init__() self.input = nn.Conv2d(in_channels=1, out_channels=128, kernel_size =3, stride=1, padding=1, bias=False) self.conv1 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False) self.output = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, sqrt(2.0 / n)) def forward(self, input_0): primals_2 = self.input.weight primals_3 = self.conv1.weight primals_4 = self.conv2.weight primals_5 = self.output.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
loyo1990/DRRN-pytorch
DRRN
false
10,425
[ "MIT" ]
0
63d7dfd4c6bcb4f7b668fc2f5b4e2031cbba6619
https://github.com/loyo1990/DRRN-pytorch/tree/63d7dfd4c6bcb4f7b668fc2f5b4e2031cbba6619
UpSampleX2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/je/cjeyximapx6ybmapc54z7zc3bjcfcpkpctbbfywktcuco4gohaj7.py # Topologically Sorted Source Nodes: [out, prelu], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # out => convolution # prelu => gt, mul, where # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 49) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [out, prelu], Original ATen: [aten.convolution, aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 784, grid=grid(784), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import * class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class UpSampleX2(torch.nn.Module): def __init__(self, num_filter, kernel_size=3, stride=2, padding=1, bias =True, activation='prelu', norm=None): super(UpSampleX2, self).__init__() self.down_conv = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, x): return self.down_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 49 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(784)](buf1, primals_2, primals_4, buf2, 784, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class UpSampleX2New(torch.nn.Module): def __init__(self, num_filter, kernel_size=3, stride=2, padding=1, bias =True, activation='prelu', norm=None): super(UpSampleX2New, self).__init__() self.down_conv = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, input_0): primals_1 = self.down_conv.deconv.weight primals_2 = self.down_conv.deconv.bias primals_4 = self.down_conv.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
lizatish/My_CNN
UpSampleX2
false
10,426
[ "MIT" ]
0
b13818bcce2f8a3697d20e34157e3dce53f953ee
https://github.com/lizatish/My_CNN/tree/b13818bcce2f8a3697d20e34157e3dce53f953ee
InteractingLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ak/caklf2pyfxcrlk24jikpqfulearftlggift3k32sdo56rqpygtef.py # Topologically Sorted Source Nodes: [querys_1], Original ATen: [aten.stack] # Source node to ATen node mapping: # querys_1 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1],), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 8) x0 = xindex % 2 x1 = (xindex // 2) % 4 x3 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*((-4) + x2))), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/d7/cd7xa5d4yg5y7exr6s4sr25rd6okj4v7452l7cyhxnqr3mcd4qhj.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kx/ckxzcazhsdasvh5sdcvshdrxriufwxfrn25tt7nuks5deb2u6ei5.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ee/ceebt7dp2dnq4qecrxhaoiakzivwsb6pug54s5t7st6c2qpbsii7.py # Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # result_4 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp11 = tl.load(in_out_ptr0 + (x2), xmask) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + (32 + (2*x1) + ((-2) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp12 = tmp10 + tmp11 tmp13 = tl.full([1], 0, tl.int32) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp15 = 0.0 tmp16 = tmp14 <= tmp15 tl.store(in_out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [querys], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [keys], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((8, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [querys_1], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(buf0, buf3, 64, grid=grid(64), stream=stream0) buf4 = reinterpret_tensor(buf0, (8, 4, 2), (8, 2, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [keys_1], Original ATen: [aten.stack] triton_poi_fused_stack_0.run(buf1, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [inner_product], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 2, 4), (8, 1, 2), 0), out=buf5) buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 128, grid=grid(128), stream=stream0) buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 128, grid=grid(128), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (8, 4, 2), (8, 2, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [values_1], Original ATen: [aten.stack] triton_poi_fused_stack_0.run(buf2, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf2, (8, 4, 2), (8, 2, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), buf8, out=buf9) buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensordot_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10) del primals_5 buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf11, buf9, buf12, 64, grid=grid(64), stream=stream0) del buf9 return (buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf8, (8, 2, 4), (8, 1, 2), 0), reinterpret_tensor(buf3, (8, 2, 4), (8, 1, 2), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from sklearn.metrics import * import torch.onnx import torch as torch class InteractingLayer(nn.Module): """A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism. Input shape - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 3D tensor with shape:``(batch_size,field_size,embedding_size)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **head_num**: int.The head number in multi-head self-attention network. - **use_res**: bool.Whether or not use standard residual connections before output. - **seed**: A Python integer to use as random seed. References - [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921) """ def __init__(self, embedding_size, head_num=2, use_res=True, scaling= False, seed=1024, device='cpu'): super(InteractingLayer, self).__init__() if head_num <= 0: raise ValueError('head_num must be a int > 0') if embedding_size % head_num != 0: raise ValueError( 'embedding_size is not an integer multiple of head_num!') self.att_embedding_size = embedding_size // head_num self.head_num = head_num self.use_res = use_res self.scaling = scaling self.seed = seed self.W_Query = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) self.W_key = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) self.W_Value = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) if self.use_res: self.W_Res = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) for tensor in self.parameters(): nn.init.normal_(tensor, mean=0.0, std=0.05) self def forward(self, inputs): if len(inputs.shape) != 3: raise ValueError( 'Unexpected inputs dimensions %d, expect to be 3 dimensions' % len(inputs.shape)) querys = torch.tensordot(inputs, self.W_Query, dims=([-1], [0])) keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0])) values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0])) querys = torch.stack(torch.split(querys, self.att_embedding_size, dim=2)) keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2)) values = torch.stack(torch.split(values, self.att_embedding_size, dim=2)) inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys) if self.scaling: inner_product /= self.att_embedding_size ** 0.5 self.normalized_att_scores = F.softmax(inner_product, dim=-1) result = torch.matmul(self.normalized_att_scores, values) result = torch.cat(torch.split(result, 1), dim=-1) result = torch.squeeze(result, dim=0) if self.use_res: result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0])) result = F.relu(result) return result def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embedding_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from sklearn.metrics import * import torch.onnx import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 8 x0 = xindex % 2 x1 = xindex // 2 % 4 x3 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * (-4 + x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp11 = tl.load(in_out_ptr0 + x2, xmask) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr0 + (32 + 2 * x1 + (-2 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp12 = tmp10 + tmp11 tmp13 = tl.full([1], 0, tl.int32) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp15 = 0.0 tmp16 = tmp14 <= tmp15 tl.store(in_out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((8, 4, 2), (8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(64)](buf0, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (8, 4, 2), (8, 2, 1), 0) del buf0 triton_poi_fused_stack_0[grid(64)](buf1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 2, 4), (8, 1, 2), 0), out=buf5) buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(128)](buf6, buf7, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (8, 4, 2), (8, 2, 1), 0) del buf1 triton_poi_fused_stack_0[grid(64)](buf2, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf2, (8, 4, 2), (8, 2, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), buf8, out=buf9) buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10) del primals_5 buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf11, buf9, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf9 return buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf8, (8, 2, 4), (8, 1, 2), 0 ), reinterpret_tensor(buf3, (8, 2, 4), (8, 1, 2), 0), buf4 class InteractingLayerNew(nn.Module): """A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism. Input shape - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 3D tensor with shape:``(batch_size,field_size,embedding_size)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **head_num**: int.The head number in multi-head self-attention network. - **use_res**: bool.Whether or not use standard residual connections before output. - **seed**: A Python integer to use as random seed. References - [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921) """ def __init__(self, embedding_size, head_num=2, use_res=True, scaling= False, seed=1024, device='cpu'): super(InteractingLayerNew, self).__init__() if head_num <= 0: raise ValueError('head_num must be a int > 0') if embedding_size % head_num != 0: raise ValueError( 'embedding_size is not an integer multiple of head_num!') self.att_embedding_size = embedding_size // head_num self.head_num = head_num self.use_res = use_res self.scaling = scaling self.seed = seed self.W_Query = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) self.W_key = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) self.W_Value = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) if self.use_res: self.W_Res = nn.Parameter(torch.Tensor(embedding_size, embedding_size)) for tensor in self.parameters(): nn.init.normal_(tensor, mean=0.0, std=0.05) self def forward(self, input_0): primals_2 = self.W_Query primals_3 = self.W_key primals_4 = self.W_Value primals_5 = self.W_Res primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dulvqingyunLT/DeepCTR-Torch
InteractingLayer
false
10,427
[ "Apache-2.0" ]
0
f40cf08f3469aa471f9ca69e44c5de51180341cc
https://github.com/dulvqingyunLT/DeepCTR-Torch/tree/f40cf08f3469aa471f9ca69e44c5de51180341cc
CriticMlp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/zb/czbrdc6746xv7kfxrqkzgbhm74ijdfuyfd3sz3llzzwzm6wzxmfi.py # Topologically Sorted Source Nodes: [global_obs_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # global_obs_1 => relu # Graph fragment: # %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xc/cxcbskmvfetvwy5kto3hmjtnvu347mijo2mhvgsdlafetsnubgud.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_2, %relu_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*(x1 // 4)) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 8, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr3 + ((-4) + x0), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/se/csemwzmaiwyhjjvzkbk56prr5otscb72tpb7v2t622v5ck5zy3s4.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu_3 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6y/c6yl4uv4syttx5p2rvialnpvw2b2afygk6u4wtiv3hxyrze6hevt.py # Topologically Sorted Source Nodes: [local_obs_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # local_obs_1 => relu_2 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ac/cac6aispvtf6fyinzk7ei7ozijq6adhgepxz7l4mnfner74j5yma.py # Topologically Sorted Source Nodes: [global_obs_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # global_obs_2 => relu_1 # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 16), (16, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [global_obs_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf2, primals_5, buf3, primals_7, buf4, 512, grid=grid(512), stream=stream0) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), out=buf5) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf6, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf8, primals_11, 256, grid=grid(256), stream=stream0) del primals_11 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf8, reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_13 buf10 = empty_strided_cuda((64, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [local_obs_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf3, primals_7, buf10, 256, grid=grid(256), stream=stream0) del buf3 del primals_7 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [global_obs_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_4.run(buf2, primals_5, buf11, 64, grid=grid(64), stream=stream0) del buf2 del primals_5 return (reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), primals_1, buf1, buf4, buf6, buf8, primals_12, primals_10, primals_8, buf10, buf11, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def init_weights(layer, gain): for p in layer.parameters(): if len(p.data.shape) >= 2: nn.init.orthogonal_(p, gain=gain) else: p.data.zero_() def all_init_weights(m, gain=2 ** 0.5): init_weights(m, gain) class CriticMlp(nn.Module): def __init__(self, obs_size, n_agent, n_action, global_encode_size, local_encode_size, fc1_size, fc2_size): super(CriticMlp, self).__init__() self.obs_size = obs_size self.n_agent = n_agent self.n_action = n_action self.global_encode_fc1 = nn.Linear(obs_size * n_agent, global_encode_size) self.global_encode_fc2 = nn.Linear(global_encode_size, global_encode_size) self.local_encode_fc = nn.Linear(obs_size, local_encode_size) self.fc1 = nn.Linear(global_encode_size + local_encode_size, fc1_size) self.fc2 = nn.Linear(fc1_size, fc2_size) self.fc3 = nn.Linear(fc2_size, n_action) self.apply(all_init_weights) init_weights(self.fc3, gain=1) def forward(self, obs_j): global_obs = obs_j.view(-1, self.obs_size * self.n_agent) global_obs = F.relu(self.global_encode_fc1(global_obs)) global_obs = F.relu(self.global_encode_fc2(global_obs)) local_obs = obs_j.view(-1, self.obs_size) local_obs = F.relu(self.local_encode_fc(local_obs)) global_obs = global_obs.repeat_interleave(self.n_agent, dim=0) x = torch.cat((global_obs, local_obs), dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) q = self.fc3(x) q = q.view(-1, self.n_agent, self.n_action) return q def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_size': 4, 'n_agent': 4, 'n_action': 4, 'global_encode_size': 4, 'local_encode_size': 4, 'fc1_size': 4, 'fc2_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x0), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 16), (16, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8 ), 0), out=buf5) buf6 = buf5 del buf5 triton_poi_fused_relu_2[grid(256)](buf6, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_2[grid(256)](buf8, primals_11, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_11 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf8, reinterpret_tensor( primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_13 buf10 = empty_strided_cuda((64, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(256)](buf3, primals_7, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_7 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(64)](buf2, primals_5, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del primals_5 return (reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), primals_1, buf1, buf4, buf6, buf8, primals_12, primals_10, primals_8, buf10, buf11, primals_4) def init_weights(layer, gain): for p in layer.parameters(): if len(p.data.shape) >= 2: nn.init.orthogonal_(p, gain=gain) else: p.data.zero_() def all_init_weights(m, gain=2 ** 0.5): init_weights(m, gain) class CriticMlpNew(nn.Module): def __init__(self, obs_size, n_agent, n_action, global_encode_size, local_encode_size, fc1_size, fc2_size): super(CriticMlpNew, self).__init__() self.obs_size = obs_size self.n_agent = n_agent self.n_action = n_action self.global_encode_fc1 = nn.Linear(obs_size * n_agent, global_encode_size) self.global_encode_fc2 = nn.Linear(global_encode_size, global_encode_size) self.local_encode_fc = nn.Linear(obs_size, local_encode_size) self.fc1 = nn.Linear(global_encode_size + local_encode_size, fc1_size) self.fc2 = nn.Linear(fc1_size, fc2_size) self.fc3 = nn.Linear(fc2_size, n_action) self.apply(all_init_weights) init_weights(self.fc3, gain=1) def forward(self, input_0): primals_2 = self.global_encode_fc1.weight primals_3 = self.global_encode_fc1.bias primals_4 = self.global_encode_fc2.weight primals_5 = self.global_encode_fc2.bias primals_6 = self.local_encode_fc.weight primals_7 = self.local_encode_fc.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_12 = self.fc3.weight primals_13 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
heavenlysf/thesis
CriticMlp
false
10,428
[ "MIT" ]
0
646553c45860f337c91a48ab7f666a174784472f
https://github.com/heavenlysf/thesis/tree/646553c45860f337c91a48ab7f666a174784472f
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nu/cnuyiboekdklervsd4zozxy6hj5ypmcigwk6x427x6ylotbbcr5k.py # Topologically Sorted Source Nodes: [mean, std, sub, add, x, mul, x_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # std => var # sub => sub # x => div # x_1 => add_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %view_1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_5), kwargs = {}) triton_per_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp28 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-08 tmp25 = tmp23 + tmp24 tmp26 = tmp0 - tmp20 tmp27 = tmp26 / tmp25 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp25, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = buf0; del buf0 # reuse buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf3 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, x, mul, x_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_std_sub_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 return (buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp28 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-08 tmp25 = tmp23 + tmp24 tmp26 = tmp0 - tmp20 tmp27 = tmp26 / tmp25 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5 class LayerNormNew(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNormNew, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kangzhiq/DeepFillv2_Pytorch
LayerNorm
false
10,429
[ "MIT" ]
0
9c7ed61b25bb995713f89108b712490737abe1b1
https://github.com/kangzhiq/DeepFillv2_Pytorch/tree/9c7ed61b25bb995713f89108b712490737abe1b1
SAB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wb/cwbnzhah456ssqf5k4ob4llmezbi7o6givtx6ppibkklzj3kmilo.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + (16*y0)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wl/cwll6lomrruls6u4c3ua52p4g5jvoxwf6bvyj4dolvf23rk3zogz.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5i/c5ipggcwfmiy6xwyorkqi45ymmmjaizazawvwrlqypsmysj65d6x.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] # Source node to ATen node mapping: # h_1 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %primals_1), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3j/c3jlveurtxkoy4v4xyl2fgccp7ieaeliub6jaslea3rhtsnp7cxd.py # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # h_2 => add_1 # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_23,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 4, 16, grid=grid(4, 16), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 4, 16, grid=grid(4, 16), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_7, buf8, 4, 16, grid=grid(4, 16), stream=stream0) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [o], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf12, primals_9, primals_1, 64, grid=grid(64), stream=stream0) del primals_9 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_5.run(buf12, buf13, primals_11, buf14, buf15, 64, grid=grid(64), stream=stream0) del buf13 del primals_11 return (buf14, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf15, primals_10, primals_8, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class SAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.mab = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) def forward(self, x, presence=None): return self.mab(x, x, presence) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](buf0, primals_3, buf3, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(4, 16)](buf1, primals_5, buf4, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(4, 16)](buf2, primals_7, buf8, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_4[grid(64)](buf12, primals_9, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_5[grid(64)](buf12, buf13, primals_11, buf14, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del primals_11 return buf14, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), buf15, primals_10, primals_8, reinterpret_tensor(buf8, (16, 1, 4 ), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class SABNew(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.mab = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) def forward(self, input_0): primals_2 = self.mab.mqkv.q_projector.weight primals_3 = self.mab.mqkv.q_projector.bias primals_4 = self.mab.mqkv.k_projector.weight primals_5 = self.mab.mqkv.k_projector.bias primals_6 = self.mab.mqkv.v_projector.weight primals_7 = self.mab.mqkv.v_projector.bias primals_8 = self.mab.mqkv.o_projector.weight primals_9 = self.mab.mqkv.o_projector.bias primals_10 = self.mab.fc.weight primals_11 = self.mab.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
karayanni/torch-scae
SAB
false
10,430
[ "Apache-2.0" ]
0
e044662d8942d8d1923d13d071f375144cf4a1e8
https://github.com/karayanni/torch-scae/tree/e044662d8942d8d1923d13d071f375144cf4a1e8
MAB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wb/cwbnzhah456ssqf5k4ob4llmezbi7o6givtx6ppibkklzj3kmilo.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + (16*y0)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wl/cwll6lomrruls6u4c3ua52p4g5jvoxwf6bvyj4dolvf23rk3zogz.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5i/c5ipggcwfmiy6xwyorkqi45ymmmjaizazawvwrlqypsmysj65d6x.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] # Source node to ATen node mapping: # h_1 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %primals_1), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3j/c3jlveurtxkoy4v4xyl2fgccp7ieaeliub6jaslea3rhtsnp7cxd.py # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # h_2 => add_1 # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_23,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_4, buf3, 4, 16, grid=grid(4, 16), stream=stream0) del primals_4 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_6, buf4, 4, 16, grid=grid(4, 16), stream=stream0) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf8, 4, 16, grid=grid(4, 16), stream=stream0) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [o], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf12, primals_10, primals_1, 64, grid=grid(64), stream=stream0) del primals_10 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_5.run(buf12, buf13, primals_12, buf14, buf15, 64, grid=grid(64), stream=stream0) del buf13 del primals_12 return (buf14, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf15, primals_11, primals_9, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](buf0, primals_4, buf3, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_4 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(4, 16)](buf1, primals_6, buf4, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(4, 16)](buf2, primals_8, buf8, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_4[grid(64)](buf12, primals_10, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_5[grid(64)](buf12, buf13, primals_12, buf14, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del primals_12 return buf14, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), buf15, primals_11, primals_9, reinterpret_tensor(buf8, (16, 1, 4 ), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MABNew(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, input_0, input_1): primals_3 = self.mqkv.q_projector.weight primals_4 = self.mqkv.q_projector.bias primals_5 = self.mqkv.k_projector.weight primals_6 = self.mqkv.k_projector.bias primals_7 = self.mqkv.v_projector.weight primals_8 = self.mqkv.v_projector.bias primals_9 = self.mqkv.o_projector.weight primals_10 = self.mqkv.o_projector.bias primals_11 = self.fc.weight primals_12 = self.fc.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
karayanni/torch-scae
MAB
false
10,431
[ "Apache-2.0" ]
0
e044662d8942d8d1923d13d071f375144cf4a1e8
https://github.com/karayanni/torch-scae/tree/e044662d8942d8d1923d13d071f375144cf4a1e8
DiscriminatorHingeLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5m/c5mz5opmf2emkyscphowbwckdgbzlmt5fg36tj7scmuhse5cwbyg.py # Topologically Sorted Source Nodes: [zeros_like, sub, minimum, mean, real_loss, zeros_like_1, sub_1, minimum_1, mean_1, fake_loss, add], Original ATen: [aten.zeros_like, aten.sub, aten.minimum, aten.mean, aten.neg, aten.rsub, aten.add] # Source node to ATen node mapping: # add => add # fake_loss => neg_1 # mean => mean # mean_1 => mean_1 # minimum => minimum # minimum_1 => minimum_1 # real_loss => neg # sub => sub # sub_1 => sub_1 # zeros_like => full_default # zeros_like_1 => full_default_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %sub), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%minimum,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (-1, %arg1_1), kwargs = {}) # %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %sub_1), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%minimum_1,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %neg_1), kwargs = {}) triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0 = async_compile.triton('triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp8 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp9 = -1.0 tmp10 = tmp9 - tmp8 tmp11 = triton_helpers.minimum(tmp3, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp7 / tmp15 tmp17 = -tmp16 tmp18 = tmp14 / tmp15 tmp19 = -tmp18 tmp20 = tmp17 + tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [zeros_like, sub, minimum, mean, real_loss, zeros_like_1, sub_1, minimum_1, mean_1, fake_loss, add], Original ATen: [aten.zeros_like, aten.sub, aten.minimum, aten.mean, aten.neg, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DiscriminatorHingeLoss(nn.Module): def __init__(self, reduction='mean'): super(DiscriminatorHingeLoss, self).__init__() if reduction not in ['mean', 'sum']: raise ValueError( 'Valid values for the reduction param are `mean`, `sum`') self.reduction = reduction def forward(self, fake_out, real_out): real_loss = -torch.minimum(torch.zeros_like(real_out), real_out - 1 ).mean() fake_loss = -torch.minimum(torch.zeros_like(fake_out), -1 - fake_out ).mean() return real_loss + fake_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp8 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp9 = -1.0 tmp10 = tmp9 - tmp8 tmp11 = triton_helpers.minimum(tmp3, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp7 / tmp15 tmp17 = -tmp16 tmp18 = tmp14 / tmp15 tmp19 = -tmp18 tmp20 = tmp17 + tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_minimum_neg_rsub_sub_zeros_like_0[grid(1)]( buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class DiscriminatorHingeLossNew(nn.Module): def __init__(self, reduction='mean'): super(DiscriminatorHingeLossNew, self).__init__() if reduction not in ['mean', 'sum']: raise ValueError( 'Valid values for the reduction param are `mean`, `sum`') self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kpandey008/SAGAN
DiscriminatorHingeLoss
false
10,432
[ "MIT" ]
0
8e673d2ccabeb0450faf30dcb347b9ff2d710ae2
https://github.com/kpandey008/SAGAN/tree/8e673d2ccabeb0450faf30dcb347b9ff2d710ae2
TransposeConv2dLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uo/cuoiyqgsyrfp53lkw4hij4ulyfkzax64rqr6gxumyfhn6ponmpoc.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_2 => convolution # x_3 => gt, mul_4, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_4), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x3), tmp7, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1.run(buf2, primals_3, buf3, 400, grid=grid(400), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='elu', norm= 'none', sn=False): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class TransposeConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=False, scale_factor=2): super(TransposeConv2dLayer, self).__init__() self.scale_factor = scale_factor self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') x = self.conv2d(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1[grid(400) ](buf2, primals_3, buf3, 400, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_3 return buf2, primals_2, buf0, buf3 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='elu', norm= 'none', sn=False): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class TransposeConv2dLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=False, scale_factor=2): super(TransposeConv2dLayerNew, self).__init__() self.scale_factor = scale_factor self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_1 = self.conv2d.conv2d.weight primals_3 = self.conv2d.conv2d.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kangzhiq/DeepFillv2_Pytorch
TransposeConv2dLayer
false
10,433
[ "MIT" ]
0
9c7ed61b25bb995713f89108b712490737abe1b1
https://github.com/kangzhiq/DeepFillv2_Pytorch/tree/9c7ed61b25bb995713f89108b712490737abe1b1
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/64/c6433gmrrnjxfqy257tszqraltgza5i6uxb7jrygvwczu6poqbwh.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 50 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sl/csldohruv4ml6lfysawazvg2w6uwqz6rik7zvmyl6nevkhfstrv4.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 39200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 9800) x4 = xindex % 9800 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (9824*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (9856*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (50, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (50, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 50, 5, 5), (1250, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 50, 28, 28), (39200, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 156800, grid=grid(156800), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 50, 14, 14), (9824, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 50, 14, 14), (9856, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 39200, grid=grid(39200), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((50, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 50, 5, 5), (1250, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 50, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(50, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 50 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 39200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 9800 x4 = xindex % 9800 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 9824 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 9856 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (50, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 50, 5, 5), (1250, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 50, 28, 28), (39200, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(156800)](buf1, primals_2, 156800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 50, 14, 14), (9824, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 50, 14, 14), (9856, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(39200)](buf1, buf2, buf3, 39200, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 50, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(50, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
lykasbongbongbong/Pytorch
Net
false
10,434
[ "MIT" ]
0
f01d89fb51ac939f5a110f5ab6190c11917e66fc
https://github.com/lykasbongbongbong/Pytorch/tree/f01d89fb51ac939f5a110f5ab6190c11917e66fc
Max_AvgPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/jg/cjgfftmpiknb3zm55q2motbdtnmpjvcqr5oxilzriywq5mqodg3b.py # Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d, aten.add] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # max_pool2d => _low_memory_max_pool2d_with_offsets # x => add # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [3, 3], [2, 2], [1, 1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %avg_pool2d), kwargs = {}) triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x4 = (xindex // 2) x3 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x4)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x4)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x0) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x4)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2*x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x4)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + ((2*x0) + (8*x4)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x4)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + (2*x1) tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x4)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x4)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x4)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x4)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp53 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x4)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp54 = tmp53 + tmp52 tmp55 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x4)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tmp55 + tmp54 tmp57 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x4)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tmp57 + tmp56 tmp59 = tl.load(in_ptr0 + ((2*x0) + (8*x4)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tmp59 + tmp58 tmp61 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x4)), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp62 = tmp61 + tmp60 tmp63 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x4)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tmp63 + tmp62 tmp65 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x4)), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp65 + tmp64 tmp67 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x4)), tmp49 & xmask, eviction_policy='evict_last', other=0.0) tmp68 = tmp67 + tmp66 tmp69 = 1 + ((-2)*x0) + ((-2)*x1) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-2)*x0*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-2)*x1*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) + (4*x0*x1) + ((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5))) + ((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5))) tmp70 = tmp68 / tmp69 tmp71 = tmp51 + tmp70 tl.store(in_out_ptr0 + (x3), tmp71, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0.run(buf2, arg0_1, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from itertools import product as product class Max_AvgPool(nn.Module): def __init__(self, kernel_size=(3, 3), stride=2, padding=1, dim=128): super(Max_AvgPool, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): x = self.Maxpool(x) + self.Avgpool(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x4 = xindex // 2 x3 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x4), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x4), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x4), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x4), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x4), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x4), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp53 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x4), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp54 = tmp53 + tmp52 tmp55 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x4), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tmp55 + tmp54 tmp57 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x4), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tmp57 + tmp56 tmp59 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tmp59 + tmp58 tmp61 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp62 = tmp61 + tmp60 tmp63 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x4), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tmp63 + tmp62 tmp65 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp65 + tmp64 tmp67 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp49 & xmask, eviction_policy='evict_last', other=0.0) tmp68 = tmp67 + tmp66 tmp69 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5) ) tmp70 = tmp68 / tmp69 tmp71 = tmp51 + tmp70 tl.store(in_out_ptr0 + x3, tmp71, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0[grid(64)]( buf2, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf2, class Max_AvgPoolNew(nn.Module): def __init__(self, kernel_size=(3, 3), stride=2, padding=1, dim=128): super(Max_AvgPoolNew, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kooBH/EXTD_Pytorch
Max_AvgPool
false
10,435
[ "MIT" ]
0
e93b196c87054684cc6c757e1dfd26f8b7dc57cf
https://github.com/kooBH/EXTD_Pytorch/tree/e93b196c87054684cc6c757e1dfd26f8b7dc57cf
my_AvgPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/vb/cvbbi4nnli3horxmlcv2azovajihodz7seyhvtrxgtxbdi7voix6.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # input_2 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%permute, [4, 4], [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp25 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp27 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp29 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module class my_AvgPool2d(Module): def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True): super(my_AvgPool2d, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad def forward(self, input): input = input.transpose(3, 1) input = F.avg_pool2d(input, self.kernel_size, self.stride, self. padding, self.ceil_mode, self.count_include_pad) input = input.transpose(3, 1).contiguous() return input def __repr__(self): return self.__class__.__name__ + '(' + 'kernel_size=' + str(self. kernel_size) + ', stride=' + str(self.stride) + ', padding=' + str( self.padding) + ', ceil_mode=' + str(self.ceil_mode ) + ', count_include_pad=' + str(self.count_include_pad) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp25 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp29 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x2, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 1, 1, 1), 0), class my_AvgPool2dNew(Module): def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True): super(my_AvgPool2dNew, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad def __repr__(self): return self.__class__.__name__ + '(' + 'kernel_size=' + str(self. kernel_size) + ', stride=' + str(self.stride) + ', padding=' + str( self.padding) + ', ceil_mode=' + str(self.ceil_mode ) + ', count_include_pad=' + str(self.count_include_pad) + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
likun97/Low_quality_classification_with_mobilenetv3
my_AvgPool2d
false
10,436
[ "Apache-2.0" ]
0
a9e6f66caad937fc7c8e101cddb76f116219b255
https://github.com/likun97/Low_quality_classification_with_mobilenetv3/tree/a9e6f66caad937fc7c8e101cddb76f116219b255
Conv2dLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ue/cuecegnhgafe2dsjwb2idu7ooicbmsi2pwlqk5kxrayxsv6nzpux.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # x_1 => convolution # x_2 => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='elu', norm= 'none', sn=False): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf1 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='elu', norm= 'none', sn=False): super(Conv2dLayerNew, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, input_0): primals_1 = self.conv2d.weight primals_3 = self.conv2d.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kangzhiq/DeepFillv2_Pytorch
Conv2dLayer
false
10,437
[ "MIT" ]
0
9c7ed61b25bb995713f89108b712490737abe1b1
https://github.com/kangzhiq/DeepFillv2_Pytorch/tree/9c7ed61b25bb995713f89108b712490737abe1b1
GatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3hlg2dj2d3nmsli5wlcbgrfym3b6ux3uuxd7pl3rggj6domt5d.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/e2/ce2cdkzdgfs7x35ywqy3cnp5gbzraaqnnk3zmuic27ohalb55dzx.py # Topologically Sorted Source Nodes: [conv, mask, gated_mask, conv_1, x_1], Original ATen: [aten.convolution, aten.sigmoid, aten.elu, aten.mul] # Source node to ATen node mapping: # conv => convolution # conv_1 => expm1, gt, mul, mul_2, where # gated_mask => sigmoid # mask => convolution_1 # x_1 => mul_3 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %sigmoid), kwargs = {}) triton_poi_fused_convolution_elu_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_elu_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0 tmp9 = tmp2 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.sigmoid(tmp5) tmp14 = tmp12 * tmp13 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(in_out_ptr1 + (x2), tmp5, xmask) tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [conv, mask, gated_mask, conv_1, x_1], Original ATen: [aten.convolution, aten.sigmoid, aten.elu, aten.mul] triton_poi_fused_convolution_elu_mul_sigmoid_1.run(buf2, buf4, primals_3, primals_5, buf5, 16, grid=grid(16), stream=stream0) del primals_3 del primals_5 return (buf5, primals_2, primals_4, buf0, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class GatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='reflect', activation='elu', norm= 'none', sn=False): super(GatedConv2d, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) self.mask_conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation= dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.mask_conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.sigmoid = torch.nn.Sigmoid() def forward(self, x): x = self.pad(x) conv = self.conv2d(x) mask = self.mask_conv2d(x) gated_mask = self.sigmoid(mask) if self.activation: conv = self.activation(conv) x = conv * gated_mask return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math .abs(-3 + x1) + 16 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0 tmp9 = tmp2 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.sigmoid(tmp5) tmp14 = tmp12 * tmp13 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(in_out_ptr1 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_elu_mul_sigmoid_1[grid(16)](buf2, buf4, primals_3, primals_5, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf4 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class GatedConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='reflect', activation='elu', norm= 'none', sn=False): super(GatedConv2dNew, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) self.mask_conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation= dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.mask_conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.sigmoid = torch.nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv2d.weight primals_3 = self.conv2d.bias primals_2 = self.mask_conv2d.weight primals_5 = self.mask_conv2d.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
kangzhiq/DeepFillv2_Pytorch
GatedConv2d
false
10,438
[ "MIT" ]
0
9c7ed61b25bb995713f89108b712490737abe1b1
https://github.com/kangzhiq/DeepFillv2_Pytorch/tree/9c7ed61b25bb995713f89108b712490737abe1b1
my_MaxPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/25/c252mdjutv7khw5cfthjqjyr4ilal3cc73gpc5h3h2hs2czd7hwz.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_2 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%permute, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp25 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp27 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp29 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.modules.utils import _pair class my_MaxPool2d(Module): def __init__(self, kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False): super(my_MaxPool2d, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.return_indices = return_indices self.ceil_mode = ceil_mode def forward(self, input): input = input.transpose(3, 1) input = F.max_pool2d(input, self.kernel_size, self.stride, self. padding, self.dilation, self.ceil_mode, self.return_indices) input = input.transpose(3, 1).contiguous() return input def __repr__(self): kh, kw = _pair(self.kernel_size) dh, dw = _pair(self.stride) padh, padw = _pair(self.padding) dilh, dilw = _pair(self.dilation) padding_str = ', padding=(' + str(padh) + ', ' + str(padw ) + ')' if padh != 0 or padw != 0 else '' dilation_str = ', dilation=(' + str(dilh) + ', ' + str(dilw ) + ')' if dilh != 0 and dilw != 0 else '' ceil_str = ', ceil_mode=' + str(self.ceil_mode) return self.__class__.__name__ + '(' + 'kernel_size=(' + str(kh ) + ', ' + str(kw) + ')' + ', stride=(' + str(dh) + ', ' + str(dw ) + ')' + padding_str + dilation_str + ceil_str + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn.modules.module import Module from torch.nn.modules.utils import _pair assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp25 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp29 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 1, 1, 1), 0), class my_MaxPool2dNew(Module): def __init__(self, kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False): super(my_MaxPool2dNew, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.return_indices = return_indices self.ceil_mode = ceil_mode def __repr__(self): kh, kw = _pair(self.kernel_size) dh, dw = _pair(self.stride) padh, padw = _pair(self.padding) dilh, dilw = _pair(self.dilation) padding_str = ', padding=(' + str(padh) + ', ' + str(padw ) + ')' if padh != 0 or padw != 0 else '' dilation_str = ', dilation=(' + str(dilh) + ', ' + str(dilw ) + ')' if dilh != 0 and dilw != 0 else '' ceil_str = ', ceil_mode=' + str(self.ceil_mode) return self.__class__.__name__ + '(' + 'kernel_size=(' + str(kh ) + ', ' + str(kw) + ')' + ', stride=(' + str(dh) + ', ' + str(dw ) + ')' + padding_str + dilation_str + ceil_str + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
likun97/Low_quality_classification_with_mobilenetv3
my_MaxPool2d
false
10,439
[ "Apache-2.0" ]
0
a9e6f66caad937fc7c8e101cddb76f116219b255
https://github.com/likun97/Low_quality_classification_with_mobilenetv3/tree/a9e6f66caad937fc7c8e101cddb76f116219b255
BaselineActor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py # Topologically Sorted Source Nodes: [act], Original ATen: [aten.tanh] # Source node to ATen node mapping: # act => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (400, 400), (400, 1)) assert_size_stride(primals_5, (400, ), (1, )) assert_size_stride(primals_6, (4, 400), (400, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 25600, grid=grid(25600), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 400), (1, 400), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 25600, grid=grid(25600), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 4), (1, 400), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [act], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(buf3, (64, 400), (400, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((400, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions class BaselineActor(nn.Module): def __init__(self, state_size, action_size, hidden_size=400): super().__init__() self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_size) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) act = torch.tanh(self.out(x)) return act def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (400, 400), (400, 1)) assert_size_stride(primals_5, (400,), (1,)) assert_size_stride(primals_6, (4, 400), (400, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf7, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 400), (1, 400), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf3, primals_5, buf6, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 4), (1, 400), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), reinterpret_tensor(buf3, (64, 400), (400, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class BaselineActorNew(nn.Module): def __init__(self, state_size, action_size, hidden_size=400): super().__init__() self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
greenstar1151/pytorch-benchmark
BaselineActor
false
10,440
[ "BSD-3-Clause" ]
0
8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
https://github.com/greenstar1151/pytorch-benchmark/tree/8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
PMA
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cm/ccmcgo4hhocf76otuns232vkfdobmiyhbrbzce7zxp7kc5eree6u.py # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_2, [4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2i/c2ifovqaljskkztxrqpysdk52llsxvhxf2zatdd7lqt2wm3hqaan.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + (16*y0)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sl/csl42w366z4dq6axbiuto4v6ryii3tmyrcbas65opz5pllxuww4m.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/56/c566ex7xddxc2fqpwqlmymdyd23nesbsyghxftm7cy73ebnuo3ke.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] # Source node to ATen node mapping: # h_1 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %repeat), kwargs = {}) triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ae/caemgqme4td5miovnvxrouyha6d75h7jrf5txbdjnejoxy7cc7ym.py # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # h_2 => add_1 # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_23,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] stream0 = get_raw_stream(0) triton_poi_fused_repeat_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, primals_4, buf4, 4, 16, grid=grid(4, 16), stream=stream0) del primals_4 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, primals_6, buf5, 4, 16, grid=grid(4, 16), stream=stream0) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf3, primals_8, buf9, 4, 16, grid=grid(4, 16), stream=stream0) del primals_8 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [o], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf13, primals_10, buf0, 64, grid=grid(64), stream=stream0) del primals_10 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_6.run(buf13, buf14, primals_12, buf15, buf16, 64, grid=grid(64), stream=stream0) del buf14 del primals_12 return (buf15, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (16, 4), (4, 1), 0), buf16, primals_11, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class PMA(nn.Module): def __init__(self, d, n_heads, n_seeds, layer_norm=False): super().__init__() self.mab = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.S = nn.Parameter(torch.zeros(1, n_seeds, d), requires_grad=True) with torch.no_grad(): nn.init.xavier_uniform_(self.S) def forward(self, x, presence=None): batch_size = x.shape[0] return self.mab(self.S.repeat(batch_size, 1, 1), x, presence) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'n_heads': 4, 'n_seeds': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf1, primals_4, buf4, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_4 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(4, 16)](buf2, primals_6, buf5, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_3[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_1[grid(4, 16)](buf3, primals_8, buf9, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused_add_5[grid(64)](buf13, primals_10, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_6[grid(64)](buf13, buf14, primals_12, buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del primals_12 return buf15, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf13, (16, 4), (4, 1), 0 ), buf16, primals_11, primals_9, reinterpret_tensor(buf9, (16, 1, 4 ), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), primals_3 def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class PMANew(nn.Module): def __init__(self, d, n_heads, n_seeds, layer_norm=False): super().__init__() self.mab = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.S = nn.Parameter(torch.zeros(1, n_seeds, d), requires_grad=True) with torch.no_grad(): nn.init.xavier_uniform_(self.S) def forward(self, input_0): primals_2 = self.S primals_3 = self.mab.mqkv.q_projector.weight primals_4 = self.mab.mqkv.q_projector.bias primals_5 = self.mab.mqkv.k_projector.weight primals_6 = self.mab.mqkv.k_projector.bias primals_7 = self.mab.mqkv.v_projector.weight primals_8 = self.mab.mqkv.v_projector.bias primals_9 = self.mab.mqkv.o_projector.weight primals_10 = self.mab.mqkv.o_projector.bias primals_11 = self.mab.fc.weight primals_12 = self.mab.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
karayanni/torch-scae
PMA
false
10,441
[ "Apache-2.0" ]
0
e044662d8942d8d1923d13d071f375144cf4a1e8
https://github.com/karayanni/torch-scae/tree/e044662d8942d8d1923d13d071f375144cf4a1e8
ISAB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cm/ccmcgo4hhocf76otuns232vkfdobmiyhbrbzce7zxp7kc5eree6u.py # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_2, [4, 1, 1]), kwargs = {}) triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2i/c2ifovqaljskkztxrqpysdk52llsxvhxf2zatdd7lqt2wm3hqaan.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + (16*y0)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # routing_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sl/csl42w366z4dq6axbiuto4v6ryii3tmyrcbas65opz5pllxuww4m.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/56/c566ex7xddxc2fqpwqlmymdyd23nesbsyghxftm7cy73ebnuo3ke.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] # Source node to ATen node mapping: # h_1 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %repeat), kwargs = {}) triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ae/caemgqme4td5miovnvxrouyha6d75h7jrf5txbdjnejoxy7cc7ym.py # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # h_2 => add_1 # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_23,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4, ), (1, )) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat] stream0 = get_raw_stream(0) triton_poi_fused_repeat_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, primals_4, buf4, 4, 16, grid=grid(4, 16), stream=stream0) del primals_4 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, primals_6, buf5, 4, 16, grid=grid(4, 16), stream=stream0) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [routing_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf3, primals_8, buf9, 4, 16, grid=grid(4, 16), stream=stream0) del primals_8 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [o], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf13, primals_10, buf0, 64, grid=grid(64), stream=stream0) del primals_10 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf15) del primals_13 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu, h_2], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_6.run(buf13, buf14, primals_12, buf16, buf32, 64, grid=grid(64), stream=stream0) del primals_12 buf17 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_4], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf15, primals_14, buf19, 4, 16, grid=grid(4, 16), stream=stream0) del primals_14 buf20 = reinterpret_tensor(buf15, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf17, primals_16, buf20, 4, 16, grid=grid(4, 16), stream=stream0) del primals_16 buf21 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [routing_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf20, (16, 1, 4), (4, 0, 1), 0), out=buf21) buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [routing_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf21, buf22, 256, grid=grid(256), stream=stream0) buf23 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [routing_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf22, buf23, 256, grid=grid(256), stream=stream0) del buf22 buf24 = reinterpret_tensor(buf17, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf18, primals_18, buf24, 4, 16, grid=grid(4, 16), stream=stream0) del primals_18 buf25 = reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [o_2], Original ATen: [aten.bmm] extern_kernels.bmm(buf23, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 0), 0), out=buf25) buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_7], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf25, buf26, 16, 4, grid=grid(16, 4), stream=stream0) buf27 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0); del buf27 # reuse # Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf28, primals_20, primals_1, 64, grid=grid(64), stream=stream0) del primals_20 buf29 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf28, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf29) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_1, h_5], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_6.run(buf28, buf29, primals_22, buf30, buf31, 64, grid=grid(64), stream=stream0) del buf29 del primals_22 return (buf30, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf23, reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(buf28, (16, 4), (4, 1), 0), buf31, primals_21, primals_19, reinterpret_tensor(buf24, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf19, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0), primals_17, primals_15, buf32, primals_11, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class ISAB(nn.Module): def __init__(self, d, n_heads, n_inducing_points, layer_norm=False): super().__init__() self.mab0 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.mab1 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.I = nn.Parameter(torch.zeros(1, n_inducing_points, d), requires_grad=True) with torch.no_grad(): nn.init.xavier_uniform_(self.I) def forward(self, x, presence=None): batch_size = x.shape[0] h = self.mab0(self.I.repeat(batch_size, 1, 1), x, presence) return self.mab1(x, h) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'n_heads': 4, 'n_inducing_points': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4,), (1,)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf1, primals_4, buf4, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_4 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(4, 16)](buf2, primals_6, buf5, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_3[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_1[grid(4, 16)](buf3, primals_8, buf9, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused_add_5[grid(64)](buf13, primals_10, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf15) del primals_13 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_6[grid(64)](buf13, buf14, primals_12, buf16, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = buf14 del buf14 extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf15, primals_14, buf19, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_14 buf20 = reinterpret_tensor(buf15, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf15 triton_poi_fused_clone_1[grid(4, 16)](buf17, primals_16, buf20, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_16 buf21 = buf7 del buf7 extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf20, (16, 1, 4), (4, 0, 1), 0), out=buf21) buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf21, buf22, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf23 = buf21 del buf21 triton_poi_fused__softmax_3[grid(256)](buf22, buf23, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf22 buf24 = reinterpret_tensor(buf17, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf17 triton_poi_fused_clone_1[grid(4, 16)](buf18, primals_18, buf24, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_18 buf25 = reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 1), 0) del buf18 extern_kernels.bmm(buf23, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 0), 0), out=buf25) buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf25, buf26, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf27 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0) del buf25 extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0) del buf27 triton_poi_fused_add_5[grid(64)](buf28, primals_20, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_20 buf29 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf28, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf29) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_6[grid(64)](buf28, buf29, primals_22, buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf29 del primals_22 return (buf30, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor( buf13, (16, 4), (4, 1), 0), reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf23, reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(buf28, (16, 4), (4, 1), 0), buf31, primals_21, primals_19, reinterpret_tensor(buf24, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf19, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0), primals_17, primals_15, buf32, primals_11, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), primals_3) def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class ISABNew(nn.Module): def __init__(self, d, n_heads, n_inducing_points, layer_norm=False): super().__init__() self.mab0 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.mab1 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.I = nn.Parameter(torch.zeros(1, n_inducing_points, d), requires_grad=True) with torch.no_grad(): nn.init.xavier_uniform_(self.I) def forward(self, input_0): primals_2 = self.I primals_3 = self.mab0.mqkv.q_projector.weight primals_4 = self.mab0.mqkv.q_projector.bias primals_5 = self.mab0.mqkv.k_projector.weight primals_6 = self.mab0.mqkv.k_projector.bias primals_7 = self.mab0.mqkv.v_projector.weight primals_8 = self.mab0.mqkv.v_projector.bias primals_9 = self.mab0.mqkv.o_projector.weight primals_10 = self.mab0.mqkv.o_projector.bias primals_11 = self.mab0.fc.weight primals_12 = self.mab0.fc.bias primals_13 = self.mab1.mqkv.q_projector.weight primals_14 = self.mab1.mqkv.q_projector.bias primals_15 = self.mab1.mqkv.k_projector.weight primals_16 = self.mab1.mqkv.k_projector.bias primals_17 = self.mab1.mqkv.v_projector.weight primals_18 = self.mab1.mqkv.v_projector.bias primals_19 = self.mab1.mqkv.o_projector.weight primals_20 = self.mab1.mqkv.o_projector.bias primals_21 = self.mab1.fc.weight primals_22 = self.mab1.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22]) return output[0]
karayanni/torch-scae
ISAB
false
10,442
[ "Apache-2.0" ]
0
e044662d8942d8d1923d13d071f375144cf4a1e8
https://github.com/karayanni/torch-scae/tree/e044662d8942d8d1923d13d071f375144cf4a1e8
adder2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/zr/czriihulllg44xhaffhdwvpylq55lkftttdpdjcdqfcu3eujm5rf.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/44/c44i7vnizkohfraswy2fybvcbu4j2tl54qnzscskv4lizxkoye7f.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.sub, aten.abs, aten.sum] # Source node to ATen node mapping: # out => abs_1, sub, sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_6, %unsqueeze_7), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [1]), kwargs = {}) triton_per_fused_abs_sub_sum_1 = async_compile.triton('triton_per_fused_abs_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = (xindex // 4) x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (4*r2)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/eh/ceh3drpczc5qvlpgea6jmkqhnvpfcv3u5sykd2ulnebudkpn4a6l.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # out_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tmp1 = -tmp0 tl.store(out_ptr0 + (x1 + (4*y0)), tmp1, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.sub, aten.abs, aten.sum] triton_per_fused_abs_sub_sum_1.run(primals_1, buf0, buf1, 16, 64, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf1, buf2, 4, 4, grid=grid(4, 4), stream=stream0) del buf1 return (buf2, primals_1, reinterpret_tensor(buf0, (1, 64, 4), (256, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch import torch.nn as nn def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class adder2d(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False): super(adder2d, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, x): output = adder2d_function(x, self.adder, self.stride, self.padding) if self.bias: output += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4, 'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.autograd import Function import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_per_fused_abs_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = -tmp0 tl.store(out_ptr0 + (x1 + 4 * y0), tmp1, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_abs_sub_sum_1[grid(16)](primals_1, buf0, buf1, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return buf2, primals_1, reinterpret_tensor(buf0, (1, 64, 4), (256, 4, 1), 0 ) def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class adder2dNew(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False): super(adder2dNew, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, input_0): primals_1 = self.adder primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
mark531593296/AdderNet
adder2d
false
10,443
[ "BSD-3-Clause" ]
0
2936728f537c0cceb8a47727630e5723af86df61
https://github.com/mark531593296/AdderNet/tree/2936728f537c0cceb8a47727630e5723af86df61
BaselineDiscreteCritic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fk/cfkpmzrz5fsihotvtb2iptrxsxsj2pu6jx4m3j5xhm4ptz5cd42c.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6c/c6cbnt5xpzncru5uqhj6zzm2rgjtlv6ylmvnad4rfkm4h2d5ni5s.py # Topologically Sorted Source Nodes: [x, linear_1], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # linear_1 => view_2 # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu, [64, 300]), kwargs = {}) triton_poi_fused_relu_view_1 = async_compile.triton('triton_poi_fused_relu_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = (xindex // 300) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (300, 4), (4, 1)) assert_size_stride(primals_2, (300, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 300), (300, 1)) assert_size_stride(primals_5, (300, ), (1, )) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 300), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf0, primals_2, buf1, buf8, 19200, grid=grid(19200), stream=stream0) del primals_2 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, linear_1], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_1.run(buf1, buf2, 19200, grid=grid(19200), stream=stream0) buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (300, 300), (1, 300), 0), out=buf3) buf4 = buf1; del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf4, buf7, 19200, grid=grid(19200), stream=stream0) del primals_5 buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_1, vals], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_1.run(buf4, buf5, 19200, grid=grid(19200), stream=stream0) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [vals], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_7 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((300, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((300, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions class BaselineDiscreteCritic(nn.Module): def __init__(self, obs_shape, action_shape, hidden_size=300): super().__init__() self.fc1 = nn.Linear(obs_shape, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_shape) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) vals = self.out(x) return vals def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_shape': 4, 'action_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (300, 4), (4, 1)) assert_size_stride(primals_2, (300,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 300), (300, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 300), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(19200)](buf0, primals_2, buf1, buf8, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_2 buf2 = buf0 del buf0 triton_poi_fused_relu_view_1[grid(19200)](buf1, buf2, 19200, XBLOCK =256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (300, 300), ( 1, 300), 0), out=buf3) buf4 = buf1 del buf1 buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(19200)](buf3, primals_5, buf4, buf7, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_5 buf5 = buf3 del buf3 triton_poi_fused_relu_view_1[grid(19200)](buf4, buf5, 19200, XBLOCK =256, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_7 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, primals_6, buf7, primals_4, buf8 class BaselineDiscreteCriticNew(nn.Module): def __init__(self, obs_shape, action_shape, hidden_size=300): super().__init__() self.fc1 = nn.Linear(obs_shape, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, action_shape) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
greenstar1151/pytorch-benchmark
BaselineDiscreteCritic
false
10,444
[ "BSD-3-Clause" ]
0
8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
https://github.com/greenstar1151/pytorch-benchmark/tree/8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
TransposeGatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ya/cya2grnbhraytq2wzrkx5sd2ottwnbrnd5ohd2xstcxyryneuc25.py # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add_4 # mv => mul_4, sum_1 # norm => pow_1, pow_2, sum_2 # truediv => div # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add_4), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_1 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qi/cqiozgecuvqtnurxrggbllqpuci3n65ycew5qi5gdqg44ypxzegy.py # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] # Source node to ATen node mapping: # mv_1 => mul_5, sum_3 # truediv => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [1]), kwargs = {}) triton_per_fused_div_mv_2 = async_compile.triton('triton_per_fused_div_mv_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mv_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qa/cqaed4ios3xqwlv4d3cciikkdz7d73vhwkegurd5cxca3y7htmvg.py # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_5 # norm_1 => pow_3, pow_4, sum_4 # truediv_1 => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add_5), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_3 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c2/cc2arficwjs4sforhl25gdfmb3uzfg7hkw46gq3mxgv57jy52z32.py # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] # Source node to ATen node mapping: # sigma => mul_7, sum_6 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_3), kwargs = {}) # %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_7,), kwargs = {}) triton_per_fused_dot_4 = async_compile.triton('triton_per_fused_dot_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dot_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kw/ckwzptlssdpmtxi6pt23ik63xcuqar2giaakuqtgizxlg5weagc7.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2w/c2wlnlirhh2nibaimsmrfiriqyr7m3r6ij6r2vrxypktuy5hni2x.py # Topologically Sorted Source Nodes: [conv, mask, gated_mask, conv_1, x_2], Original ATen: [aten.convolution, aten.sigmoid, aten.leaky_relu, aten.mul] # Source node to ATen node mapping: # conv => convolution # conv_1 => gt, mul_12, where # gated_mask => sigmoid # mask => convolution_1 # x_2 => mul_13 # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %div_2, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %div_5, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_12), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %sigmoid), kwargs = {}) triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 0.2 tmp9 = tmp2 * tmp8 tmp10 = tl.where(tmp7, tmp2, tmp9) tmp11 = tl.sigmoid(tmp5) tmp12 = tmp10 * tmp11 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, ), (1, ), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse buf27 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_1.run(buf3, primals_4, primals_2, buf1, buf27, 1, 64, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_2.run(primals_4, buf1, buf3, buf4, 4, 64, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_3.run(buf4, buf6, 1, 4, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] triton_per_fused_dot_4.run(buf6, buf4, buf7, 1, 4, grid=grid(1), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_5.run(primals_4, buf7, buf8, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 5, 5), (100, 25, 5, 1)) buf11 = empty_strided_cuda((64, ), (1, ), torch.float32) buf12 = empty_strided_cuda((), (), torch.float32) buf13 = buf12; del buf12 # reuse buf36 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv_3, norm_2, add_2, truediv_3], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_1.run(buf13, primals_8, primals_6, buf11, buf36, 1, 64, grid=grid(1), stream=stream0) buf14 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [truediv_3, mv_4], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_2.run(primals_8, buf11, buf13, buf14, 4, 64, grid=grid(4), stream=stream0) buf16 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_3, add_3, truediv_4], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_3.run(buf14, buf16, 1, 4, grid=grid(1), stream=stream0) buf17 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma_1], Original ATen: [aten.dot] triton_per_fused_dot_4.run(buf16, buf14, buf17, 1, 4, grid=grid(1), stream=stream0) del buf14 buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_5], Original ATen: [aten.div] triton_poi_fused_div_5.run(primals_8, buf17, buf18, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf0, buf18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 5, 5), (100, 25, 5, 1)) buf10 = buf9; del buf9 # reuse buf20 = buf19; del buf19 # reuse buf21 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [conv, mask, gated_mask, conv_1, x_2], Original ATen: [aten.convolution, aten.sigmoid, aten.leaky_relu, aten.mul] triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6.run(buf10, buf20, primals_5, primals_9, buf21, 400, grid=grid(400), stream=stream0) del primals_5 del primals_9 # Topologically Sorted Source Nodes: [], Original ATen: [] buf22 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf22, (4, ), (1, )) del buf1 # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div] buf28 = torch.ops.aten.set_.source_Tensor(primals_3, buf27) assert_size_stride(buf28, (64, ), (1, )) del primals_3 # Topologically Sorted Source Nodes: [], Original ATen: [] buf31 = torch.ops.aten.set_.source_Tensor(primals_6, buf16) assert_size_stride(buf31, (4, ), (1, )) del buf11 # Topologically Sorted Source Nodes: [truediv_3], Original ATen: [aten.div] buf37 = torch.ops.aten.set_.source_Tensor(primals_7, buf36) assert_size_stride(buf37, (64, ), (1, )) del primals_7 return (buf21, buf8, buf18, primals_2, primals_4, primals_6, primals_8, buf0, buf3, buf6, buf7, buf8, buf10, buf13, buf16, buf17, buf18, buf20, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class GatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='reflect', activation='elu', norm= 'none', sn=False): super(GatedConv2d, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) self.mask_conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation= dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.mask_conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.sigmoid = torch.nn.Sigmoid() def forward(self, x): x = self.pad(x) conv = self.conv2d(x) mask = self.mask_conv2d(x) gated_mask = self.sigmoid(mask) if self.activation: conv = self.activation(conv) x = conv * gated_mask return x class TransposeGatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True, scale_factor=2): super(TransposeGatedConv2d, self).__init__() self.scale_factor = scale_factor self.gated_conv2d = GatedConv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') x = self.gated_conv2d(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 0.2 tmp9 = tmp2 * tmp8 tmp10 = tl.where(tmp7, tmp2, tmp9) tmp11 = tl.sigmoid(tmp5) tmp12 = tmp10 * tmp11 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64,), (1,), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 buf27 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf3, primals_4, primals_2, buf1, buf27, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_div_mv_2[grid(4)](primals_4, buf1, buf3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_4[grid(1)](buf6, buf4, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_5[grid(256)](primals_4, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 5, 5), (100, 25, 5, 1)) buf11 = empty_strided_cuda((64,), (1,), torch.float32) buf12 = empty_strided_cuda((), (), torch.float32) buf13 = buf12 del buf12 buf36 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf13, primals_8, primals_6, buf11, buf36, 1, 64, XBLOCK=1, num_warps= 2, num_stages=1) buf14 = buf4 del buf4 triton_per_fused_div_mv_2[grid(4)](primals_8, buf11, buf13, buf14, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf16 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf14, buf16, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf17 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_4[grid(1)](buf16, buf14, buf17, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf14 buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_5[grid(256)](primals_8, buf17, buf18, 256, XBLOCK=128, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf0, buf18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 5, 5), (100, 25, 5, 1)) buf10 = buf9 del buf9 buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32 ) triton_poi_fused_convolution_leaky_relu_mul_sigmoid_6[grid(400)](buf10, buf20, primals_5, primals_9, buf21, 400, XBLOCK=256, num_warps= 4, num_stages=1) del primals_5 del primals_9 buf22 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf22, (4,), (1,)) del buf1 buf28 = torch.ops.aten.set_.source_Tensor(primals_3, buf27) assert_size_stride(buf28, (64,), (1,)) del primals_3 buf31 = torch.ops.aten.set_.source_Tensor(primals_6, buf16) assert_size_stride(buf31, (4,), (1,)) del buf11 buf37 = torch.ops.aten.set_.source_Tensor(primals_7, buf36) assert_size_stride(buf37, (64,), (1,)) del primals_7 return (buf21, buf8, buf18, primals_2, primals_4, primals_6, primals_8, buf0, buf3, buf6, buf7, buf8, buf10, buf13, buf16, buf17, buf18, buf20) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class GatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='reflect', activation='elu', norm= 'none', sn=False): super(GatedConv2d, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) self.mask_conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation= dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.mask_conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.sigmoid = torch.nn.Sigmoid() def forward(self, x): x = self.pad(x) conv = self.conv2d(x) mask = self.mask_conv2d(x) gated_mask = self.sigmoid(mask) if self.activation: conv = self.activation(conv) x = conv * gated_mask return x class TransposeGatedConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True, scale_factor=2): super(TransposeGatedConv2dNew, self).__init__() self.scale_factor = scale_factor self.gated_conv2d = GatedConv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_2 = self.gated_conv2d.conv2d.module.bias primals_5 = self.gated_conv2d.conv2d.module.weight_u primals_3 = self.gated_conv2d.conv2d.module.weight_v primals_1 = self.gated_conv2d.conv2d.module.weight_bar primals_6 = self.gated_conv2d.mask_conv2d.module.bias primals_9 = self.gated_conv2d.mask_conv2d.module.weight_u primals_7 = self.gated_conv2d.mask_conv2d.module.weight_v primals_4 = self.gated_conv2d.mask_conv2d.module.weight_bar primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
kangzhiq/DeepFillv2_Pytorch
TransposeGatedConv2d
false
10,445
[ "MIT" ]
0
9c7ed61b25bb995713f89108b712490737abe1b1
https://github.com/kangzhiq/DeepFillv2_Pytorch/tree/9c7ed61b25bb995713f89108b712490737abe1b1
NextSentencePrediction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] # Source node to ATen node mapping: # linear => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/t6/ct6t4t4f6df2rswen66qmskrag4dcnuaoxkebk4pssna63yxl3v3.py # Topologically Sorted Source Nodes: [linear, log_softmax], Original ATen: [aten.add, aten._log_softmax] # Source node to ATen node mapping: # linear => add # log_softmax => amax, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) triton_poi_fused__log_softmax_add_1 = async_compile.triton('triton_poi_fused__log_softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jt/cjtlgs566wh4nqwf6wvcsracaovzta35wbm5vllgfuavp5ebkiq4.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear, log_softmax], Original ATen: [aten.add, aten._log_softmax] triton_poi_fused__log_softmax_add_1.run(buf1, primals_3, buf2, 32, grid=grid(32), stream=stream0) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf2, buf3, 32, grid=grid(32), stream=stream0) del buf2 return (buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions class NextSentencePrediction(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, x): return self.softmax(self.linear(x[:, 0])) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0) del buf1 triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class NextSentencePredictionNew(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
greenstar1151/pytorch-benchmark
NextSentencePrediction
false
10,446
[ "BSD-3-Clause" ]
0
8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
https://github.com/greenstar1151/pytorch-benchmark/tree/8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
AttentionPool2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ct/cctgbe64jgxq3sxjjjqccvq653sunfecfcizp3jcofnl7uiib7wo.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # x_1 => cat # x_2 => add # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %view], -1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {}) triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 17 x3 = (xindex // 17) x4 = xindex % 68 x5 = xindex tmp15 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 17, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x5), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rq/crqhbpxwwpvdowuqzuzvjdahxv45tx2y4dpxda2rurvr5kralgbn.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 0.7071067811865475), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 68) x3 = xindex % 68 x1 = (xindex // 17) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (204*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cr/ccrjqh2olqyevqox4t3kpoubo2s2m44cuvtxmf2k3slvdcgmmlcm.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865475), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 68) x3 = xindex % 68 x1 = (xindex // 17) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (68 + x3 + (204*x2)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6p/c6p34hneq7lp7a3tjiwk44lxqe2hzbpdjgbgmanamv35xvznwb2j.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_4 = async_compile.triton('triton_per_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 68 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (17*x0)), tmp11, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wf/cwf36kt6t5p6sv4fjknukcyy4vz6ejfamnreogfggthvquklzkf6.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_3, %primals_4, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17) % 12 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oe/coet3g5rxo652nti4d4ogiwpej2mrpgh4cyidpv6aivegc4mda7s.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_9, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (68*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (17*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uk/cuk32wvqx6lzak6biu2zut26pzpqa3ell4xsxs3qv3cpuuon7kkw.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_9, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 17), (17, 1)) assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (12, ), (1, )) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0) del buf0 del primals_1 del primals_2 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 17), (204, 17, 1)) buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf2, primals_4, buf3, 272, grid=grid(272), stream=stream0) buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf2, primals_4, buf4, 272, grid=grid(272), stream=stream0) buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_4.run(buf5, buf8, 68, 17, grid=grid(68), stream=stream0) del buf5 buf9 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf9, primals_4, 816, grid=grid(816), stream=stream0) del primals_4 buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204, 1, 17), 136), out=buf10) buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf10, buf11, 16, 17, grid=grid(16, 17), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 17), (68, 17, 1)) del buf11 buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf13, primals_6, 272, grid=grid(272), stream=stream0) del primals_6 return (reinterpret_tensor(buf13, (4, 4), (68, 17), 0), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204, 17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 17), (17, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch as th def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv, encoder_kv=None): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) if encoder_kv is not None: assert encoder_kv.shape[1] == self.n_heads * ch * 2 ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch , dim=1) k = th.cat([ek, k], dim=-1) v = th.cat([ev, v], dim=-1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', q * scale, k * scale) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v) return a.reshape(bs, -1, length) class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) x = x + self.positional_embedding[None, :, :] x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 17 x3 = xindex // 17 x4 = xindex % 68 x5 = xindex tmp15 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x5, tmp16, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 68 x3 = xindex % 68 x1 = xindex // 17 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 204 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 68 x3 = xindex % 68 x1 = xindex // 17 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (68 + x3 + 204 * x2), xmask) tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 68 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 17 * x0), tmp11, rmask & xmask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 68 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 17 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 17), (17, 1)) assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 17), (204, 17, 1)) buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_mul_2[grid(272)](buf2, primals_4, buf3, 272, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_mul_3[grid(272)](buf2, primals_4, buf4, 272, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) triton_per_fused__softmax_4[grid(68)](buf5, buf8, 68, 17, XBLOCK=1, num_warps=2, num_stages=1) del buf5 buf9 = buf2 del buf2 triton_poi_fused_convolution_5[grid(816)](buf9, primals_4, 816, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204, 1, 17), 136), out=buf10) buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_convolution_6[grid(16, 17)](buf10, buf11, 16, 17, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 17), (68, 17, 1)) del buf11 buf13 = buf12 del buf12 triton_poi_fused_convolution_7[grid(272)](buf13, primals_6, 272, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return reinterpret_tensor(buf13, (4, 4), (68, 17), 0 ), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204, 17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0 ) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv, encoder_kv=None): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) if encoder_kv is not None: assert encoder_kv.shape[1] == self.n_heads * ch * 2 ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch , dim=1) k = th.cat([ek, k], dim=-1) v = th.cat([ev, v], dim=-1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', q * scale, k * scale) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v) return a.reshape(bs, -1, length) class AttentionPool2dNew(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.qkv_proj.weight primals_4 = self.qkv_proj.bias primals_5 = self.c_proj.weight primals_6 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
litevxx/glid-3
AttentionPool2d
false
10,447
[ "MIT" ]
0
d7bd53e671d642b0cbc8af81197170b585c7e624
https://github.com/litevxx/glid-3/tree/d7bd53e671d642b0cbc8af81197170b585c7e624
ConcreteDenseMixture
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5m/c5m6znlu4sybwbvy6htxjumpdsiyi5qroh6vctqzm4q4pynppftx.py # Topologically Sorted Source Nodes: [p, add, log, sub, add_1, log_1, sub_1, add_2, log_2, add_3, sub_2, add_4, log_3, drop_prob, truediv, drop_prob_1, random_tensor, x, x_1], Original ATen: [aten.sigmoid, aten.add, aten.log, aten.rsub, aten.sub, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # drop_prob => sub_3 # drop_prob_1 => sigmoid_1 # log => log # log_1 => log_1 # log_2 => log_2 # log_3 => log_3 # p => sigmoid # random_tensor => sub_4 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # truediv => div # x => mul # x_1 => div_1 # Graph fragment: # %sigmoid : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-07), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-07), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand, 1e-07), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %log_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-07), kwargs = {}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_4,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %log_3), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_3, 0.1), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sub_4), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sub), kwargs = {}) triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp12 = tl.load(in_ptr2 + (x0), xmask) tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-07 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = 1.0 tmp8 = tmp7 - tmp3 tmp9 = tmp8 + tmp4 tmp10 = tl_math.log(tmp9) tmp11 = tmp6 - tmp10 tmp13 = tmp12 + tmp4 tmp14 = tl_math.log(tmp13) tmp15 = tmp11 + tmp14 tmp16 = tmp7 - tmp12 tmp17 = tmp16 + tmp4 tmp18 = tl_math.log(tmp17) tmp19 = tmp15 - tmp18 tmp20 = 10.0 tmp21 = tmp19 * tmp20 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp7 - tmp22 tmp24 = tmp0 * tmp23 tmp25 = tmp24 / tmp8 tl.store(out_ptr0 + (x0), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oo/coow5so5su6vtsogkckryyczitsbdkncnh4l2dnly7t36twt6us6.py # Topologically Sorted Source Nodes: [pow_1, sum_1], Original ATen: [aten.pow, aten.sum] # Source node to ATen node mapping: # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) triton_per_fused_pow_sum_1 = async_compile.triton('triton_per_fused_pow_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/or/corzuezitsvrehhmp6i4nhbwxpmbztc6s6rcir6feyzjzvi6an4v.py # Topologically Sorted Source Nodes: [p, sub, sum_of_square, pow_2, sum_2, sum_of_square_1, mul_1, weights_regularizer, log_4, dropout_regularizer, sub_7, log_5, mul_3, dropout_regularizer_1, dropout_regularizer_2, regularization_1], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div, aten.log] # Source node to ATen node mapping: # dropout_regularizer => mul_2 # dropout_regularizer_1 => add_7 # dropout_regularizer_2 => mul_4 # log_4 => log_4 # log_5 => log_5 # mul_1 => mul_1 # mul_3 => mul_3 # p => sigmoid # pow_2 => pow_2 # regularization_1 => add_8 # sub => sub # sub_7 => sub_7 # sum_2 => sum_2 # sum_of_square => add_5 # sum_of_square_1 => add_6 # weights_regularizer => div_2 # Graph fragment: # %sigmoid : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {}) # %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_4, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %sum_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 4), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sub), kwargs = {}) # %log_4 : [num_users=2] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %log_4), kwargs = {}) # %sub_7 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_7,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %log_5), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 32.0), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, %mul_4), kwargs = {}) # %div_160 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, %sub), kwargs = {}) triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp11 = tl.load(in_ptr2 + (0)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp7 = tl.sigmoid(tmp6) tmp8 = tl_math.log(tmp7) tmp9 = 1.0 tmp10 = tmp9 - tmp7 tmp13 = 0.0 tmp14 = tmp12 + tmp13 tmp15 = tmp14 + tmp4 tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 / tmp10 tmp19 = tmp7 * tmp8 tmp20 = tl_math.log(tmp10) tmp21 = tmp10 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 32.0 tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tmp18 / tmp10 tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None) tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None) tl.store(out_ptr3 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ch/cchcvqeuz2kdsoaw53vwus5rv43qa3wvhkanfzsif6i6cuwympx5.py # Topologically Sorted Source Nodes: [input_2, p_1, add_7, log_6, sub_9, add_8, log_7, sub_10, add_9, log_8, add_10, sub_11, add_11, log_9, drop_prob_2, truediv_2, drop_prob_3, random_tensor_1, x_2, x_3], Original ATen: [aten.tanh, aten.sigmoid, aten.add, aten.log, aten.rsub, aten.sub, aten.div, aten.mul] # Source node to ATen node mapping: # add_10 => add_12 # add_11 => add_13 # add_7 => add_9 # add_8 => add_10 # add_9 => add_11 # drop_prob_2 => sub_12 # drop_prob_3 => sigmoid_3 # input_2 => tanh # log_6 => log_6 # log_7 => log_7 # log_8 => log_8 # log_9 => log_9 # p_1 => sigmoid_2 # random_tensor_1 => sub_13 # sub_10 => sub_10 # sub_11 => sub_11 # sub_9 => sub_9 # truediv_2 => div_3 # x_2 => mul_5 # x_3 => div_4 # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_2,), kwargs = {}) # %sigmoid_2 : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_5,), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid_2, 1e-07), kwargs = {}) # %log_6 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_9,), kwargs = {}) # %sub_9 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_2), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_9, 1e-07), kwargs = {}) # %log_7 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_10,), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_6, %log_7), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_1, 1e-07), kwargs = {}) # %log_8 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_11,), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_10, %log_8), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_1), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_11, 1e-07), kwargs = {}) # %log_9 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_13,), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %log_9), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_12, 0.1), kwargs = {}) # %sigmoid_3 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div_3,), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sub_13), kwargs = {}) # %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_5, %sub_9), kwargs = {}) triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3 = async_compile.triton('triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp13 = tl.load(in_ptr2 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = 1e-07 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp4 tmp10 = tmp9 + tmp5 tmp11 = tl_math.log(tmp10) tmp12 = tmp7 - tmp11 tmp14 = tmp13 + tmp5 tmp15 = tl_math.log(tmp14) tmp16 = tmp12 + tmp15 tmp17 = tmp8 - tmp13 tmp18 = tmp17 + tmp5 tmp19 = tl_math.log(tmp18) tmp20 = tmp16 - tmp19 tmp21 = 10.0 tmp22 = tmp20 * tmp21 tmp23 = tl.sigmoid(tmp22) tmp24 = tmp8 - tmp23 tmp25 = tmp1 * tmp24 tmp26 = tmp25 / tmp9 tl.store(out_ptr0 + (x0), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ag/cagmdttkouzii56lcbd4mlc6oyw7xiz3hgky42obwbgdidfdzoqs.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_for_fused_4 = async_compile.triton('triton_for_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.foreach( num_warps=8, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'kernel_name': 'triton_for_fused_4', 'mutated_arg_names': [], 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, ) @triton.jit def triton_for_fused_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8): pid = tl.program_id(0) XBLOCK: tl.constexpr = 1024 num_xblocks_0 = tl.cdiv(1, XBLOCK) num_xblocks_1 = num_xblocks_0 + tl.cdiv(1, XBLOCK) num_xblocks_2 = num_xblocks_1 + tl.cdiv(1, XBLOCK) num_xblocks_3 = num_xblocks_2 + tl.cdiv(1, XBLOCK) num_xblocks_4 = num_xblocks_3 + tl.cdiv(1, XBLOCK) num_xblocks_5 = num_xblocks_4 + tl.cdiv(1, XBLOCK) num_xblocks_6 = num_xblocks_5 + tl.cdiv(1, XBLOCK) num_xblocks_7 = num_xblocks_6 + tl.cdiv(1, XBLOCK) num_xblocks_8 = num_xblocks_7 + tl.cdiv(1, XBLOCK) if pid < num_xblocks_0: pid_offset = pid xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp1, None) elif pid < num_xblocks_1: pid_offset = pid - num_xblocks_0 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp2 = tl.load(in_ptr1 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tl.store(out_ptr1 + (tl.full([XBLOCK], 0, tl.int32)), tmp3, None) elif pid < num_xblocks_2: pid_offset = pid - num_xblocks_1 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tl.store(out_ptr2 + (tl.full([XBLOCK], 0, tl.int32)), tmp5, None) elif pid < num_xblocks_3: pid_offset = pid - num_xblocks_2 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp6 = tl.load(in_ptr3 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tl.store(out_ptr3 + (tl.full([XBLOCK], 0, tl.int32)), tmp7, None) elif pid < num_xblocks_4: pid_offset = pid - num_xblocks_3 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp8 = tl.load(in_ptr4 + (0)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tl.store(out_ptr4 + (tl.full([XBLOCK], 0, tl.int32)), tmp9, None) elif pid < num_xblocks_5: pid_offset = pid - num_xblocks_4 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp10 = tl.load(in_ptr5 + (0)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tl.store(out_ptr5 + (tl.full([XBLOCK], 0, tl.int32)), tmp11, None) elif pid < num_xblocks_6: pid_offset = pid - num_xblocks_5 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp12 = tl.load(in_ptr6 + (0)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tl.store(out_ptr6 + (tl.full([XBLOCK], 0, tl.int32)), tmp13, None) elif pid < num_xblocks_7: pid_offset = pid - num_xblocks_6 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp14 = tl.load(in_ptr7 + (0)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tl.store(out_ptr7 + (tl.full([XBLOCK], 0, tl.int32)), tmp15, None) elif pid < num_xblocks_8: pid_offset = pid - num_xblocks_7 xnumel = 1 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp16 = tl.load(in_ptr8 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tl.store(out_ptr8 + (tl.full([XBLOCK], 0, tl.int32)), tmp17, None) else: pass ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zh/czh2ghogdlf4qx4mbbij2czxblnwgpba7hf4peek3nrfz3ke65y7.py # Topologically Sorted Source Nodes: [sigmoid_24], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid_24 => sigmoid_6 # Graph fragment: # %sigmoid_6 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_12,), kwargs = {}) triton_poi_fused_sigmoid_5 = async_compile.triton('triton_poi_fused_sigmoid_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 9 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rd/crd7zpxjaubl3xwr6s3dxqbevflh4ofguejlgr4a32gpmzolkemf.py # Topologically Sorted Source Nodes: [sub_99, pow_23, sum_23, sum_of_square_22, pow_24, sum_24, sum_of_square_23, mul_45, weights_regularizer_11, log_70, dropout_regularizer_33, sub_106, log_71, mul_47, dropout_regularizer_34, dropout_regularizer_35, regularization_12], Original ATen: [aten.rsub, aten.pow, aten.sum, aten.add, aten.mul, aten.div, aten.log] # Source node to ATen node mapping: # dropout_regularizer_33 => mul_57 # dropout_regularizer_34 => add_106 # dropout_regularizer_35 => mul_59 # log_70 => log_70 # log_71 => log_71 # mul_45 => mul_56 # mul_47 => mul_58 # pow_23 => pow_23 # pow_24 => pow_24 # regularization_12 => add_107 # sub_106 => sub_106 # sub_99 => sub_91 # sum_23 => sum_23 # sum_24 => sum_24 # sum_of_square_22 => add_104 # sum_of_square_23 => add_105 # weights_regularizer_11 => div_35 # Graph fragment: # %sub_91 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_8), kwargs = {}) # %pow_23 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_36, 2), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_23,), kwargs = {}) # %add_104 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_23, 0), kwargs = {}) # %pow_24 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_37, 2), kwargs = {}) # %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_24,), kwargs = {}) # %add_105 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_104, %sum_24), kwargs = {}) # %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_105, 4), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_56, %sub_91), kwargs = {}) # %log_70 : [num_users=2] = call_function[target=torch.ops.aten.log.default](args = (%getitem_8,), kwargs = {}) # %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_8, %log_70), kwargs = {}) # %sub_106 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %getitem_8), kwargs = {}) # %log_71 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_106,), kwargs = {}) # %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_106, %log_71), kwargs = {}) # %add_106 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_57, %mul_58), kwargs = {}) # %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_106, 32.0), kwargs = {}) # %add_107 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_35, %mul_59), kwargs = {}) # %div_39 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, %sub_91), kwargs = {}) triton_per_fused_add_div_log_mul_pow_rsub_sum_6 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_rsub_sum_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_rsub_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mul_pow_rsub_sum_6(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (8)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp12 = tl.load(in_ptr2 + (0)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp7 = tl_math.log(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp6 tmp10 = 0.0 tmp11 = tmp4 + tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 / tmp9 tmp19 = tmp6 * tmp7 tmp20 = tl_math.log(tmp9) tmp21 = tmp9 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 32.0 tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tmp18 / tmp9 tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None) tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) tl.store(out_ptr3 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tl/ctlayqq4hegcuvwscwjlkk3jvo4lradqxpndemm6kbslgl7aoahd.py # Topologically Sorted Source Nodes: [pow_8, sum_8], Original ATen: [aten.pow, aten.sum] # Source node to ATen node mapping: # pow_8 => pow_8 # sum_8 => sum_8 # Graph fragment: # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_13, 2), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_8,), kwargs = {}) triton_per_fused_pow_sum_7 = async_compile.triton('triton_per_fused_pow_sum_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sum_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_sum_7(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 3 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yr/cyrcdmzymqmgy3uy4er6nkcwmlte6efc3shfpandhquk2eyvzs4u.py # Topologically Sorted Source Nodes: [pow_11, sum_11], Original ATen: [aten.pow, aten.sum] # Source node to ATen node mapping: # pow_11 => pow_11 # sum_11 => sum_11 # Graph fragment: # %pow_11 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_18, 2), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_11,), kwargs = {}) triton_per_fused_pow_sum_8 = async_compile.triton('triton_per_fused_pow_sum_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sum_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_sum_8(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/42/c42hwesai5nocof66i3vgu2h4yx5eyjkkhuw46lm4gp55cnlv5ry.py # Topologically Sorted Source Nodes: [sub_45, sum_of_square_10, pow_12, sum_12, sum_of_square_11, mul_21, weights_regularizer_5], Original ATen: [aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div] # Source node to ATen node mapping: # mul_21 => mul_23 # pow_12 => pow_12 # sub_45 => sub_43 # sum_12 => sum_12 # sum_of_square_10 => add_50 # sum_of_square_11 => add_51 # weights_regularizer_5 => div_14 # Graph fragment: # %sub_43 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {}) # %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_11, 0), kwargs = {}) # %pow_12 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_19, 2), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_12,), kwargs = {}) # %add_51 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_50, %sum_12), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_51, 4), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_23, %sub_43), kwargs = {}) # %div_114 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, %sub_43), kwargs = {}) triton_per_fused_add_div_mul_pow_rsub_sum_9 = async_compile.triton('triton_per_fused_add_div_mul_pow_rsub_sum_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_rsub_sum_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 6 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1]) tmp13 = tl.load(in_ptr2 + (2)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp8 = 0.0 tmp9 = tmp7 + tmp8 tmp10 = tmp9 + tmp5 tmp11 = 4.0 tmp12 = tmp10 * tmp11 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp12 / tmp16 tmp18 = tmp17 / tmp16 tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ri/cridgry2xm245a23u75d65thgtjwllmoy3elpz33w7a4pwtrfnu6.py # Topologically Sorted Source Nodes: [sub_72, sum_of_square_16, pow_18, sum_18, sum_of_square_17, mul_33, weights_regularizer_8], Original ATen: [aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div] # Source node to ATen node mapping: # mul_33 => mul_35 # pow_18 => pow_18 # sub_72 => sub_67 # sum_18 => sum_18 # sum_of_square_16 => add_77 # sum_of_square_17 => add_78 # weights_regularizer_8 => div_20 # Graph fragment: # %sub_67 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_5), kwargs = {}) # %add_77 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_17, 0), kwargs = {}) # %pow_18 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_28, 2), kwargs = {}) # %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_18,), kwargs = {}) # %add_78 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_77, %sum_18), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_78, 4), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_35, %sub_67), kwargs = {}) # %div_90 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, %sub_67), kwargs = {}) triton_per_fused_add_div_mul_pow_rsub_sum_10 = async_compile.triton('triton_per_fused_add_div_mul_pow_rsub_sum_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_rsub_sum_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 6 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1]) tmp13 = tl.load(in_ptr2 + (5)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp8 = 0.0 tmp9 = tmp7 + tmp8 tmp10 = tmp9 + tmp5 tmp11 = 4.0 tmp12 = tmp10 * tmp11 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp12 / tmp16 tmp18 = tmp17 / tmp16 tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zs/czs4gufsgrnyhhzlfgvfufobsmm6xraz7oweb72an5ufo7bjroyt.py # Topologically Sorted Source Nodes: [pow_19, sum_19], Original ATen: [aten.pow, aten.sum] # Source node to ATen node mapping: # pow_19 => pow_19 # sum_19 => sum_19 # Graph fragment: # %pow_19 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_30, 2), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_19,), kwargs = {}) triton_per_fused_pow_sum_11 = async_compile.triton('triton_per_fused_pow_sum_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sum_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_sum_11(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dj/cdjdhwz5va5vtqe2nrqby3vvq5krnefikusczyguzrhiyp74dddp.py # Topologically Sorted Source Nodes: [sub_27, pow_7, sum_7, sum_of_square_6, sum_of_square_7, mul_13, weights_regularizer_3, sub_36, pow_9, sum_9, sum_of_square_8, sum_of_square_9, mul_17, weights_regularizer_4, sub_54, pow_13, sum_13, sum_of_square_12, sum_of_square_13, mul_25, weights_regularizer_6, sub_63, pow_15, sum_15, sum_of_square_14, sum_of_square_15, mul_29, weights_regularizer_7, sum_25], Original ATen: [aten.rsub, aten.pow, aten.sum, aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # mul_13 => mul_15 # mul_17 => mul_19 # mul_25 => mul_27 # mul_29 => mul_31 # pow_13 => pow_13 # pow_15 => pow_15 # pow_7 => pow_7 # pow_9 => pow_9 # sub_27 => sub_27 # sub_36 => sub_35 # sub_54 => sub_51 # sub_63 => sub_59 # sum_13 => sum_13 # sum_15 => sum_15 # sum_25 => sum_25 # sum_7 => sum_7 # sum_9 => sum_9 # sum_of_square_12 => add_59 # sum_of_square_13 => add_60 # sum_of_square_14 => add_68 # sum_of_square_15 => add_69 # sum_of_square_6 => add_32 # sum_of_square_7 => add_33 # sum_of_square_8 => add_41 # sum_of_square_9 => add_42 # weights_regularizer_3 => div_10 # weights_regularizer_4 => div_12 # weights_regularizer_6 => div_16 # weights_regularizer_7 => div_18 # Graph fragment: # %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%empty, %view_3, 0, 0), kwargs = {}) # %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %view_7, 0, 1), kwargs = {}) # %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %view_11, 0, 2), kwargs = {}) # %sub_27 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_12, 2), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_7,), kwargs = {}) # %add_32 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_7, 0), kwargs = {}) # %add_33 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_32, %sum_8), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_33, 4), kwargs = {}) # %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_15, %sub_27), kwargs = {}) # %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %view_13, 0, 3), kwargs = {}) # %sub_35 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_1), kwargs = {}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_15, 2), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_9,), kwargs = {}) # %add_41 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, 0), kwargs = {}) # %add_42 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_41, %sum_10), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_42, 4), kwargs = {}) # %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_19, %sub_35), kwargs = {}) # %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %view_14, 0, 4), kwargs = {}) # %select_scatter_default_5 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_4, %view_15, 0, 5), kwargs = {}) # %sub_51 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_3), kwargs = {}) # %pow_13 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_21, 2), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_13,), kwargs = {}) # %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_13, 0), kwargs = {}) # %add_60 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_59, %sum_14), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_60, 4), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_27, %sub_51), kwargs = {}) # %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %view_16, 0, 6), kwargs = {}) # %sub_59 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_4), kwargs = {}) # %pow_15 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_24, 2), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_15,), kwargs = {}) # %add_68 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_15, 0), kwargs = {}) # %add_69 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_68, %sum_16), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_69, 4), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_31, %sub_59), kwargs = {}) # %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_6, %view_17, 0, 7), kwargs = {}) # %select_scatter_default_8 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_7, %view_18, 0, 8), kwargs = {}) # %select_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_8, %view_19, 0, 9), kwargs = {}) # %select_scatter_default_10 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_9, %view_20, 0, 10), kwargs = {}) # %select_scatter_default_11 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_10, %view_49, 0, 11), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_scatter_default_11,), kwargs = {}) # %div_98 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, %sub_59), kwargs = {}) # %div_106 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, %sub_51), kwargs = {}) # %div_122 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, %sub_35), kwargs = {}) # %div_130 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, %sub_27), kwargs = {}) triton_per_fused_add_div_mul_pow_rsub_sum_12 = async_compile.triton('triton_per_fused_add_div_mul_pow_rsub_sum_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: 'i32', 27: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {26: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25), equal_to_1=(26,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_rsub_sum_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 36, 'num_reduction': 5, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp6 = tl.load(in_ptr1 + (r0), rmask, other=0.0) tmp12 = tl.load(in_ptr2 + (r0), rmask, other=0.0) tmp18 = tl.load(in_ptr3 + (r0), rmask, other=0.0) tmp29 = tl.load(in_ptr4 + (0)) tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp34 = tl.load(in_ptr5 + (0)) tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp49 = tl.load(in_ptr6 + (0)) tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp53 = tl.load(in_ptr7 + (0)) tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK]) tmp57 = tl.load(in_ptr8 + (0)) tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK]) tmp66 = tl.load(in_ptr9 + (0)) tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.load(in_ptr10 + (0)) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK]) tmp73 = tl.load(in_ptr5 + (2)) tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp87 = tl.load(in_ptr11 + (0)) tmp88 = tl.broadcast_to(tmp87, [XBLOCK, RBLOCK]) tmp91 = tl.load(in_ptr5 + (1)) tmp92 = tl.broadcast_to(tmp91, [XBLOCK, RBLOCK]) tmp107 = tl.load(in_ptr12 + (0)) tmp108 = tl.broadcast_to(tmp107, [XBLOCK, RBLOCK]) tmp111 = tl.load(in_ptr5 + (4)) tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK]) tmp125 = tl.load(in_ptr13 + (0)) tmp126 = tl.broadcast_to(tmp125, [XBLOCK, RBLOCK]) tmp129 = tl.load(in_ptr5 + (3)) tmp130 = tl.broadcast_to(tmp129, [XBLOCK, RBLOCK]) tmp144 = tl.load(in_ptr14 + (0)) tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = tl.load(in_ptr15 + (0)) tmp148 = tl.broadcast_to(tmp147, [XBLOCK, RBLOCK]) tmp152 = tl.load(in_ptr5 + (6)) tmp153 = tl.broadcast_to(tmp152, [XBLOCK, RBLOCK]) tmp165 = tl.load(in_ptr16 + (0)) tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK]) tmp168 = tl.load(in_ptr17 + (0)) tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK]) tmp172 = tl.load(in_ptr5 + (5)) tmp173 = tl.broadcast_to(tmp172, [XBLOCK, RBLOCK]) tmp187 = tl.load(in_ptr18 + (0)) tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp191 = tl.load(in_ptr19 + (0)) tmp192 = tl.broadcast_to(tmp191, [XBLOCK, RBLOCK]) tmp194 = tl.load(in_ptr20 + (0)) tmp195 = tl.broadcast_to(tmp194, [XBLOCK, RBLOCK]) tmp199 = tl.load(in_ptr5 + (7)) tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK]) tmp216 = tl.broadcast_to(tmp107, [XBLOCK, 1]) tmp219 = tl.broadcast_to(tmp111, [XBLOCK, 1]) tmp223 = tl.broadcast_to(tmp125, [XBLOCK, 1]) tmp226 = tl.broadcast_to(tmp129, [XBLOCK, 1]) tmp230 = tl.broadcast_to(tmp87, [XBLOCK, 1]) tmp233 = tl.broadcast_to(tmp91, [XBLOCK, 1]) tmp237 = tl.broadcast_to(tmp29, [XBLOCK, 1]) tmp240 = tl.broadcast_to(tmp34, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(rmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(rmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = r0 tmp25 = tl.full([1, 1], 3, tl.int32) tmp26 = tmp24 == tmp25 tmp27 = 0.0 tmp28 = tmp23 + tmp27 tmp31 = tmp28 + tmp30 tmp32 = 4.0 tmp33 = tmp31 * tmp32 tmp36 = 1.0 tmp37 = tmp36 - tmp35 tmp38 = tmp33 / tmp37 tmp39 = tl_math.log(tmp35) tmp40 = tmp35 * tmp39 tmp41 = tl_math.log(tmp37) tmp42 = tmp37 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = 32.0 tmp45 = tmp43 * tmp44 tmp46 = tmp38 + tmp45 tmp47 = tl.full([1, 1], 2, tl.int32) tmp48 = tmp24 == tmp47 tmp51 = tl.full([1, 1], 1, tl.int32) tmp52 = tmp24 == tmp51 tmp55 = tl.full([1, 1], 0, tl.int32) tmp56 = tmp24 == tmp55 tmp59 = float("nan") tmp60 = tl.where(tmp56, tmp58, tmp59) tmp61 = tl.where(tmp52, tmp54, tmp60) tmp62 = tl.where(tmp48, tmp50, tmp61) tmp63 = tl.where(tmp26, tmp46, tmp62) tmp64 = tl.full([1, 1], 5, tl.int32) tmp65 = tmp24 == tmp64 tmp68 = tmp67 + tmp27 tmp71 = tmp68 + tmp70 tmp72 = tmp71 * tmp32 tmp75 = tmp36 - tmp74 tmp76 = tmp72 / tmp75 tmp77 = tl_math.log(tmp74) tmp78 = tmp74 * tmp77 tmp79 = tl_math.log(tmp75) tmp80 = tmp75 * tmp79 tmp81 = tmp78 + tmp80 tmp82 = tmp81 * tmp44 tmp83 = tmp76 + tmp82 tmp84 = tl.full([1, 1], 4, tl.int32) tmp85 = tmp24 == tmp84 tmp86 = tmp11 + tmp27 tmp89 = tmp86 + tmp88 tmp90 = tmp89 * tmp32 tmp93 = tmp36 - tmp92 tmp94 = tmp90 / tmp93 tmp95 = tl_math.log(tmp92) tmp96 = tmp92 * tmp95 tmp97 = tl_math.log(tmp93) tmp98 = tmp93 * tmp97 tmp99 = tmp96 + tmp98 tmp100 = tmp99 * tmp44 tmp101 = tmp94 + tmp100 tmp102 = tl.where(tmp85, tmp101, tmp63) tmp103 = tl.where(tmp65, tmp83, tmp102) tmp104 = tl.full([1, 1], 7, tl.int32) tmp105 = tmp24 == tmp104 tmp106 = tmp17 + tmp27 tmp109 = tmp106 + tmp108 tmp110 = tmp109 * tmp32 tmp113 = tmp36 - tmp112 tmp114 = tmp110 / tmp113 tmp115 = tl_math.log(tmp112) tmp116 = tmp112 * tmp115 tmp117 = tl_math.log(tmp113) tmp118 = tmp113 * tmp117 tmp119 = tmp116 + tmp118 tmp120 = tmp119 * tmp44 tmp121 = tmp114 + tmp120 tmp122 = tl.full([1, 1], 6, tl.int32) tmp123 = tmp24 == tmp122 tmp124 = tmp5 + tmp27 tmp127 = tmp124 + tmp126 tmp128 = tmp127 * tmp32 tmp131 = tmp36 - tmp130 tmp132 = tmp128 / tmp131 tmp133 = tl_math.log(tmp130) tmp134 = tmp130 * tmp133 tmp135 = tl_math.log(tmp131) tmp136 = tmp131 * tmp135 tmp137 = tmp134 + tmp136 tmp138 = tmp137 * tmp44 tmp139 = tmp132 + tmp138 tmp140 = tl.where(tmp123, tmp139, tmp103) tmp141 = tl.where(tmp105, tmp121, tmp140) tmp142 = tl.full([1, 1], 9, tl.int32) tmp143 = tmp24 == tmp142 tmp146 = tmp145 + tmp27 tmp149 = tmp148 * tmp148 tmp150 = tmp146 + tmp149 tmp151 = tmp150 * tmp32 tmp154 = tmp36 - tmp153 tmp155 = tmp151 / tmp154 tmp156 = tl_math.log(tmp153) tmp157 = tmp153 * tmp156 tmp158 = tl_math.log(tmp154) tmp159 = tmp154 * tmp158 tmp160 = tmp157 + tmp159 tmp161 = tmp160 * tmp44 tmp162 = tmp155 + tmp161 tmp163 = tl.full([1, 1], 8, tl.int32) tmp164 = tmp24 == tmp163 tmp167 = tmp166 + tmp27 tmp170 = tmp167 + tmp169 tmp171 = tmp170 * tmp32 tmp174 = tmp36 - tmp173 tmp175 = tmp171 / tmp174 tmp176 = tl_math.log(tmp173) tmp177 = tmp173 * tmp176 tmp178 = tl_math.log(tmp174) tmp179 = tmp174 * tmp178 tmp180 = tmp177 + tmp179 tmp181 = tmp180 * tmp44 tmp182 = tmp175 + tmp181 tmp183 = tl.where(tmp164, tmp182, tmp141) tmp184 = tl.where(tmp143, tmp162, tmp183) tmp185 = tl.full([1, 1], 11, tl.int32) tmp186 = tmp24 == tmp185 tmp189 = tl.full([1, 1], 10, tl.int32) tmp190 = tmp24 == tmp189 tmp193 = tmp192 + tmp27 tmp196 = tmp195 * tmp195 tmp197 = tmp193 + tmp196 tmp198 = tmp197 * tmp32 tmp201 = tmp36 - tmp200 tmp202 = tmp198 / tmp201 tmp203 = tl_math.log(tmp200) tmp204 = tmp200 * tmp203 tmp205 = tl_math.log(tmp201) tmp206 = tmp201 * tmp205 tmp207 = tmp204 + tmp206 tmp208 = tmp207 * tmp44 tmp209 = tmp202 + tmp208 tmp210 = tl.where(tmp190, tmp209, tmp184) tmp211 = tl.where(tmp186, tmp188, tmp210) tmp212 = tl.broadcast_to(tmp211, [XBLOCK, RBLOCK]) tmp214 = tl.where(rmask, tmp212, 0) tmp215 = tl.sum(tmp214, 1)[:, None] tmp217 = tmp106 + tmp216 tmp218 = tmp217 * tmp32 tmp220 = tmp36 - tmp219 tmp221 = tmp218 / tmp220 tmp222 = tmp221 / tmp220 tmp224 = tmp124 + tmp223 tmp225 = tmp224 * tmp32 tmp227 = tmp36 - tmp226 tmp228 = tmp225 / tmp227 tmp229 = tmp228 / tmp227 tmp231 = tmp86 + tmp230 tmp232 = tmp231 * tmp32 tmp234 = tmp36 - tmp233 tmp235 = tmp232 / tmp234 tmp236 = tmp235 / tmp234 tmp238 = tmp28 + tmp237 tmp239 = tmp238 * tmp32 tmp241 = tmp36 - tmp240 tmp242 = tmp239 / tmp241 tmp243 = tmp242 / tmp241 tl.store(out_ptr5 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp222, None) tl.store(out_ptr6 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp229, None) tl.store(out_ptr7 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp236, None) tl.store(out_ptr8 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp243, None) tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp215, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oq/coqmsyjhloguyy7mzq6xqosrkboymzslyzp7xjwrvac32ifw5u2p.py # Topologically Sorted Source Nodes: [add_21, log_18, sub_27, add_22, log_19, sub_28, add_23, log_20, add_24, sub_29, add_25, log_21, drop_prob_6, truediv_6], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_21 => add_27 # add_22 => add_28 # add_23 => add_29 # add_24 => add_30 # add_25 => add_31 # drop_prob_6 => sub_30 # log_18 => log_18 # log_19 => log_19 # log_20 => log_20 # log_21 => log_21 # sub_27 => sub_27 # sub_28 => sub_28 # sub_29 => sub_29 # truediv_6 => div_9 # Graph fragment: # %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-07), kwargs = {}) # %log_18 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_27,), kwargs = {}) # %sub_27 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem), kwargs = {}) # %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_27, 1e-07), kwargs = {}) # %log_19 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_28,), kwargs = {}) # %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_18, %log_19), kwargs = {}) # %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_3, 1e-07), kwargs = {}) # %log_20 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_29,), kwargs = {}) # %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_28, %log_20), kwargs = {}) # %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_3), kwargs = {}) # %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_29, 1e-07), kwargs = {}) # %log_21 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_31,), kwargs = {}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_30, %log_21), kwargs = {}) # %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_30, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_13 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/pv/cpvyxfqatwcudsrjlgbints7pg627ivbkfsarevqyxgr44ckrgyi.py # Topologically Sorted Source Nodes: [add_28, log_24, sub_36, add_29, log_25, sub_37, add_30, log_26, add_31, sub_38, add_32, log_27, drop_prob_8, truediv_8], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_28 => add_36 # add_29 => add_37 # add_30 => add_38 # add_31 => add_39 # add_32 => add_40 # drop_prob_8 => sub_38 # log_24 => log_24 # log_25 => log_25 # log_26 => log_26 # log_27 => log_27 # sub_36 => sub_35 # sub_37 => sub_36 # sub_38 => sub_37 # truediv_8 => div_11 # Graph fragment: # %add_36 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, 1e-07), kwargs = {}) # %log_24 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_36,), kwargs = {}) # %sub_35 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_1), kwargs = {}) # %add_37 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_35, 1e-07), kwargs = {}) # %log_25 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_37,), kwargs = {}) # %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_24, %log_25), kwargs = {}) # %add_38 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_4, 1e-07), kwargs = {}) # %log_26 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_38,), kwargs = {}) # %add_39 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_36, %log_26), kwargs = {}) # %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_4), kwargs = {}) # %add_40 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_37, 1e-07), kwargs = {}) # %log_27 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_40,), kwargs = {}) # %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_39, %log_27), kwargs = {}) # %div_11 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_38, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_14 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/fm/cfmk2f3ravvmga7irng47525qqbr67cjqqfl2llqxdaidd4slbjd.py # Topologically Sorted Source Nodes: [add_35, log_30, sub_45, add_36, log_31, sub_46, add_37, log_32, add_38, sub_47, add_39, log_33, drop_prob_10, truediv_10], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_35 => add_45 # add_36 => add_46 # add_37 => add_47 # add_38 => add_48 # add_39 => add_49 # drop_prob_10 => sub_46 # log_30 => log_30 # log_31 => log_31 # log_32 => log_32 # log_33 => log_33 # sub_45 => sub_43 # sub_46 => sub_44 # sub_47 => sub_45 # truediv_10 => div_13 # Graph fragment: # %add_45 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-07), kwargs = {}) # %log_30 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_45,), kwargs = {}) # %sub_43 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {}) # %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_43, 1e-07), kwargs = {}) # %log_31 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_46,), kwargs = {}) # %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_30, %log_31), kwargs = {}) # %add_47 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_5, 1e-07), kwargs = {}) # %log_32 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_47,), kwargs = {}) # %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_44, %log_32), kwargs = {}) # %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_5), kwargs = {}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_45, 1e-07), kwargs = {}) # %log_33 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_49,), kwargs = {}) # %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_48, %log_33), kwargs = {}) # %div_13 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_46, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_15 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/di/cdirwn2ukwxdh2a3nou24fj4l2rchs36wnfpwym5xdmekuijp226.py # Topologically Sorted Source Nodes: [add_42, log_36, sub_54, add_43, log_37, sub_55, add_44, log_38, add_45, sub_56, add_46, log_39, drop_prob_12, truediv_12], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_42 => add_54 # add_43 => add_55 # add_44 => add_56 # add_45 => add_57 # add_46 => add_58 # drop_prob_12 => sub_54 # log_36 => log_36 # log_37 => log_37 # log_38 => log_38 # log_39 => log_39 # sub_54 => sub_51 # sub_55 => sub_52 # sub_56 => sub_53 # truediv_12 => div_15 # Graph fragment: # %add_54 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_3, 1e-07), kwargs = {}) # %log_36 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_54,), kwargs = {}) # %sub_51 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_3), kwargs = {}) # %add_55 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_51, 1e-07), kwargs = {}) # %log_37 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_55,), kwargs = {}) # %sub_52 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_36, %log_37), kwargs = {}) # %add_56 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_6, 1e-07), kwargs = {}) # %log_38 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_56,), kwargs = {}) # %add_57 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_52, %log_38), kwargs = {}) # %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_6), kwargs = {}) # %add_58 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_53, 1e-07), kwargs = {}) # %log_39 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_58,), kwargs = {}) # %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_57, %log_39), kwargs = {}) # %div_15 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_54, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_16 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zl/czldcaqd3p7spunsfhj5tn3qiabkrv5zzmrv2muwuhegjchnaqx6.py # Topologically Sorted Source Nodes: [add_49, log_42, sub_63, add_50, log_43, sub_64, add_51, log_44, add_52, sub_65, add_53, log_45, drop_prob_14, truediv_14], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_49 => add_63 # add_50 => add_64 # add_51 => add_65 # add_52 => add_66 # add_53 => add_67 # drop_prob_14 => sub_62 # log_42 => log_42 # log_43 => log_43 # log_44 => log_44 # log_45 => log_45 # sub_63 => sub_59 # sub_64 => sub_60 # sub_65 => sub_61 # truediv_14 => div_17 # Graph fragment: # %add_63 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-07), kwargs = {}) # %log_42 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_63,), kwargs = {}) # %sub_59 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_4), kwargs = {}) # %add_64 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_59, 1e-07), kwargs = {}) # %log_43 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_64,), kwargs = {}) # %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_42, %log_43), kwargs = {}) # %add_65 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_7, 1e-07), kwargs = {}) # %log_44 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_65,), kwargs = {}) # %add_66 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_60, %log_44), kwargs = {}) # %sub_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_7), kwargs = {}) # %add_67 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_61, 1e-07), kwargs = {}) # %log_45 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_67,), kwargs = {}) # %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_66, %log_45), kwargs = {}) # %div_17 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_62, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_17 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ve/cve2ojp6dw2uoupp6dvcy2z2p7x3ft3gwihdyamzwsqxcdh44vmc.py # Topologically Sorted Source Nodes: [add_56, log_48, sub_72, add_57, log_49, sub_73, add_58, log_50, add_59, sub_74, add_60, log_51, drop_prob_16, truediv_16], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_56 => add_72 # add_57 => add_73 # add_58 => add_74 # add_59 => add_75 # add_60 => add_76 # drop_prob_16 => sub_70 # log_48 => log_48 # log_49 => log_49 # log_50 => log_50 # log_51 => log_51 # sub_72 => sub_67 # sub_73 => sub_68 # sub_74 => sub_69 # truediv_16 => div_19 # Graph fragment: # %add_72 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-07), kwargs = {}) # %log_48 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_72,), kwargs = {}) # %sub_67 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_5), kwargs = {}) # %add_73 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_67, 1e-07), kwargs = {}) # %log_49 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_73,), kwargs = {}) # %sub_68 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_48, %log_49), kwargs = {}) # %add_74 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_8, 1e-07), kwargs = {}) # %log_50 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_74,), kwargs = {}) # %add_75 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_68, %log_50), kwargs = {}) # %sub_69 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_8), kwargs = {}) # %add_76 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_69, 1e-07), kwargs = {}) # %log_51 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_76,), kwargs = {}) # %sub_70 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_75, %log_51), kwargs = {}) # %div_19 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_70, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_18 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (5)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dv/cdvczblwwzl7vx22llaqgu7l27h5ryk2igyz7o7vzyj2h2wpuhbr.py # Topologically Sorted Source Nodes: [add_63, log_54, sub_81, add_64, log_55, sub_82, add_65, log_56, add_66, sub_83, add_67, log_57, drop_prob_18, truediv_18], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_63 => add_81 # add_64 => add_82 # add_65 => add_83 # add_66 => add_84 # add_67 => add_85 # drop_prob_18 => sub_78 # log_54 => log_54 # log_55 => log_55 # log_56 => log_56 # log_57 => log_57 # sub_81 => sub_75 # sub_82 => sub_76 # sub_83 => sub_77 # truediv_18 => div_21 # Graph fragment: # %add_81 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-07), kwargs = {}) # %log_54 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_81,), kwargs = {}) # %sub_75 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_6), kwargs = {}) # %add_82 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_75, 1e-07), kwargs = {}) # %log_55 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_82,), kwargs = {}) # %sub_76 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_54, %log_55), kwargs = {}) # %add_83 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_9, 1e-07), kwargs = {}) # %log_56 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_83,), kwargs = {}) # %add_84 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_76, %log_56), kwargs = {}) # %sub_77 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_9), kwargs = {}) # %add_85 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_77, 1e-07), kwargs = {}) # %log_57 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_85,), kwargs = {}) # %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_84, %log_57), kwargs = {}) # %div_21 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_78, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_19 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (6)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/bu/cbukwzy456kwupizkedavdwjwq2yotsyoyelboy57r6kiqkhuzrp.py # Topologically Sorted Source Nodes: [add_70, log_60, sub_90, add_71, log_61, sub_91, add_72, log_62, add_73, sub_92, add_74, log_63, drop_prob_20, truediv_20], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_70 => add_90 # add_71 => add_91 # add_72 => add_92 # add_73 => add_93 # add_74 => add_94 # drop_prob_20 => sub_86 # log_60 => log_60 # log_61 => log_61 # log_62 => log_62 # log_63 => log_63 # sub_90 => sub_83 # sub_91 => sub_84 # sub_92 => sub_85 # truediv_20 => div_23 # Graph fragment: # %add_90 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_7, 1e-07), kwargs = {}) # %log_60 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_90,), kwargs = {}) # %sub_83 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_7), kwargs = {}) # %add_91 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_83, 1e-07), kwargs = {}) # %log_61 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_91,), kwargs = {}) # %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_60, %log_61), kwargs = {}) # %add_92 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_10, 1e-07), kwargs = {}) # %log_62 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_92,), kwargs = {}) # %add_93 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_84, %log_62), kwargs = {}) # %sub_85 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_10), kwargs = {}) # %add_94 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_85, 1e-07), kwargs = {}) # %log_63 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_94,), kwargs = {}) # %sub_86 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_93, %log_63), kwargs = {}) # %div_23 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_86, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_20 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (7)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7b/c7biwoo3uunmfxmxwnjem6pmhwqf3h4tnj4qkv7a4p7ino7xfhim.py # Topologically Sorted Source Nodes: [add_77, log_66, sub_99, add_78, log_67, sub_100, add_79, log_68, add_80, sub_101, add_81, log_69, drop_prob_22, truediv_22], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] # Source node to ATen node mapping: # add_77 => add_99 # add_78 => add_100 # add_79 => add_101 # add_80 => add_102 # add_81 => add_103 # drop_prob_22 => sub_94 # log_66 => log_66 # log_67 => log_67 # log_68 => log_68 # log_69 => log_69 # sub_100 => sub_92 # sub_101 => sub_93 # sub_99 => sub_91 # truediv_22 => div_25 # Graph fragment: # %add_99 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-07), kwargs = {}) # %log_66 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_99,), kwargs = {}) # %sub_91 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_8), kwargs = {}) # %add_100 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_91, 1e-07), kwargs = {}) # %log_67 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_100,), kwargs = {}) # %sub_92 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_66, %log_67), kwargs = {}) # %add_101 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand_11, 1e-07), kwargs = {}) # %log_68 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_101,), kwargs = {}) # %add_102 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_92, %log_68), kwargs = {}) # %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand_11), kwargs = {}) # %add_103 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_93, 1e-07), kwargs = {}) # %log_69 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_103,), kwargs = {}) # %sub_94 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_102, %log_69), kwargs = {}) # %div_25 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_94, 0.1), kwargs = {}) triton_poi_fused_add_div_log_rsub_sub_21 = async_compile.triton('triton_poi_fused_add_div_log_rsub_sub_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_rsub_sub_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (8)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ve/cve6bbmu3zgzd2l4enod45renr7qtdhz7ereyugmqisymx3ejt34.py # Topologically Sorted Source Nodes: [sigmoid_25], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid_25 => sigmoid_7 # Graph fragment: # %sigmoid_7 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_21,), kwargs = {}) triton_poi_fused_sigmoid_22 = async_compile.triton('triton_poi_fused_sigmoid_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_22(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/x5/cx5pxsptdjvwp3olpp3xz4ixq7wqxopnurob5t2kxqz2tkrh33gd.py # Topologically Sorted Source Nodes: [input_6, sub_27, sub_36, sub_45, sub_54, sub_63, sub_72, sub_81, sub_90, sub_99, random_tensor_10, x_20, x_21, random_tensor_9, x_18, x_19, random_tensor_8, x_16, x_17, random_tensor_7, x_14, x_15, random_tensor_6, x_12, x_13, random_tensor_5, x_10, x_11, random_tensor_4, x_8, x_9, random_tensor_3, x_6, x_7, random_tensor_11, x_22, x_23], Original ATen: [aten.tanh, aten.rsub, aten.mul, aten.div] # Source node to ATen node mapping: # input_6 => tanh_2 # random_tensor_10 => sub_95 # random_tensor_11 => sub_103 # random_tensor_3 => sub_102 # random_tensor_4 => sub_101 # random_tensor_5 => sub_100 # random_tensor_6 => sub_99 # random_tensor_7 => sub_98 # random_tensor_8 => sub_97 # random_tensor_9 => sub_96 # sub_27 => sub_27 # sub_36 => sub_35 # sub_45 => sub_43 # sub_54 => sub_51 # sub_63 => sub_59 # sub_72 => sub_67 # sub_81 => sub_75 # sub_90 => sub_83 # sub_99 => sub_91 # x_10 => mul_52 # x_11 => div_31 # x_12 => mul_51 # x_13 => div_30 # x_14 => mul_50 # x_15 => div_29 # x_16 => mul_49 # x_17 => div_28 # x_18 => mul_48 # x_19 => div_27 # x_20 => mul_47 # x_21 => div_26 # x_22 => mul_55 # x_23 => div_34 # x_6 => mul_54 # x_7 => div_33 # x_8 => mul_53 # x_9 => div_32 # Graph fragment: # %tanh_2 : [num_users=9] = call_function[target=torch.ops.aten.tanh.default](args = (%view_10,), kwargs = {}) # %sub_27 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem), kwargs = {}) # %sub_35 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_1), kwargs = {}) # %sub_43 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {}) # %sub_51 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_3), kwargs = {}) # %sub_59 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_4), kwargs = {}) # %sub_67 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_5), kwargs = {}) # %sub_75 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_6), kwargs = {}) # %sub_83 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_7), kwargs = {}) # %sub_91 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_8), kwargs = {}) # %sub_95 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_16), kwargs = {}) # %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_95), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_47, %sub_83), kwargs = {}) # %sub_96 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_15), kwargs = {}) # %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_96), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_48, %sub_75), kwargs = {}) # %sub_97 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_14), kwargs = {}) # %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_97), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_49, %sub_67), kwargs = {}) # %sub_98 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_13), kwargs = {}) # %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_98), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_50, %sub_59), kwargs = {}) # %sub_99 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_12), kwargs = {}) # %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_99), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_51, %sub_51), kwargs = {}) # %sub_100 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_11), kwargs = {}) # %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_100), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_52, %sub_43), kwargs = {}) # %sub_101 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_10), kwargs = {}) # %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_101), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_53, %sub_35), kwargs = {}) # %sub_102 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_9), kwargs = {}) # %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_102), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_54, %sub_27), kwargs = {}) # %sub_103 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_17), kwargs = {}) # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %sub_103), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_55, %sub_91), kwargs = {}) triton_poi_fused_div_mul_rsub_tanh_23 = async_compile.triton('triton_poi_fused_div_mul_rsub_tanh_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_rsub_tanh_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 19, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_rsub_tanh_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1792 + x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (7)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp10 = tl.load(in_ptr0 + (1536 + x0), xmask) tmp13 = tl.load(in_ptr2 + (6)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (1280 + x0), xmask) tmp20 = tl.load(in_ptr2 + (5)) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp24 = tl.load(in_ptr0 + (1024 + x0), xmask) tmp27 = tl.load(in_ptr2 + (4)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp31 = tl.load(in_ptr0 + (768 + x0), xmask) tmp34 = tl.load(in_ptr2 + (3)) tmp35 = tl.broadcast_to(tmp34, [XBLOCK]) tmp38 = tl.load(in_ptr0 + (512 + x0), xmask) tmp41 = tl.load(in_ptr2 + (2)) tmp42 = tl.broadcast_to(tmp41, [XBLOCK]) tmp45 = tl.load(in_ptr0 + (256 + x0), xmask) tmp48 = tl.load(in_ptr2 + (1)) tmp49 = tl.broadcast_to(tmp48, [XBLOCK]) tmp52 = tl.load(in_ptr0 + (x0), xmask) tmp55 = tl.load(in_ptr2 + (0)) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp59 = tl.load(in_ptr0 + (2048 + x0), xmask) tmp62 = tl.load(in_ptr2 + (8)) tmp63 = tl.broadcast_to(tmp62, [XBLOCK]) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = libdevice.tanh(tmp3) tmp5 = tmp4 * tmp2 tmp8 = tmp1 - tmp7 tmp9 = tmp5 / tmp8 tmp11 = tmp1 - tmp10 tmp12 = tmp4 * tmp11 tmp15 = tmp1 - tmp14 tmp16 = tmp12 / tmp15 tmp18 = tmp1 - tmp17 tmp19 = tmp4 * tmp18 tmp22 = tmp1 - tmp21 tmp23 = tmp19 / tmp22 tmp25 = tmp1 - tmp24 tmp26 = tmp4 * tmp25 tmp29 = tmp1 - tmp28 tmp30 = tmp26 / tmp29 tmp32 = tmp1 - tmp31 tmp33 = tmp4 * tmp32 tmp36 = tmp1 - tmp35 tmp37 = tmp33 / tmp36 tmp39 = tmp1 - tmp38 tmp40 = tmp4 * tmp39 tmp43 = tmp1 - tmp42 tmp44 = tmp40 / tmp43 tmp46 = tmp1 - tmp45 tmp47 = tmp4 * tmp46 tmp50 = tmp1 - tmp49 tmp51 = tmp47 / tmp50 tmp53 = tmp1 - tmp52 tmp54 = tmp4 * tmp53 tmp57 = tmp1 - tmp56 tmp58 = tmp54 / tmp57 tmp60 = tmp1 - tmp59 tmp61 = tmp4 * tmp60 tmp64 = tmp1 - tmp63 tmp65 = tmp61 / tmp64 tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr1 + (x0), tmp9, xmask) tl.store(out_ptr2 + (x0), tmp11, xmask) tl.store(out_ptr3 + (x0), tmp16, xmask) tl.store(out_ptr4 + (x0), tmp18, xmask) tl.store(out_ptr5 + (x0), tmp23, xmask) tl.store(out_ptr6 + (x0), tmp25, xmask) tl.store(out_ptr7 + (x0), tmp30, xmask) tl.store(out_ptr8 + (x0), tmp32, xmask) tl.store(out_ptr9 + (x0), tmp37, xmask) tl.store(out_ptr10 + (x0), tmp39, xmask) tl.store(out_ptr11 + (x0), tmp44, xmask) tl.store(out_ptr12 + (x0), tmp46, xmask) tl.store(out_ptr13 + (x0), tmp51, xmask) tl.store(out_ptr14 + (x0), tmp53, xmask) tl.store(out_ptr15 + (x0), tmp58, xmask) tl.store(out_ptr16 + (x0), tmp60, xmask) tl.store(out_ptr17 + (x0), tmp65, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (1, ), (1, )) assert_size_stride(primals_12, (3, 4), (4, 1)) assert_size_stride(primals_13, (3, ), (1, )) assert_size_stride(primals_14, (1, ), (1, )) assert_size_stride(primals_15, (3, 4), (4, 1)) assert_size_stride(primals_16, (3, ), (1, )) assert_size_stride(primals_17, (1, ), (1, )) assert_size_stride(primals_18, (6, 4), (4, 1)) assert_size_stride(primals_19, (6, ), (1, )) assert_size_stride(primals_20, (1, ), (1, )) assert_size_stride(primals_21, (3, 4), (4, 1)) assert_size_stride(primals_22, (3, ), (1, )) assert_size_stride(primals_23, (1, ), (1, )) assert_size_stride(primals_24, (3, 4), (4, 1)) assert_size_stride(primals_25, (3, ), (1, )) assert_size_stride(primals_26, (1, ), (1, )) assert_size_stride(primals_27, (6, 4), (4, 1)) assert_size_stride(primals_28, (6, ), (1, )) assert_size_stride(primals_29, (1, ), (1, )) assert_size_stride(primals_30, (1, 4), (4, 1)) assert_size_stride(primals_31, (1, ), (1, )) assert_size_stride(primals_32, (1, ), (1, )) assert_size_stride(primals_33, (1, 4), (4, 1)) assert_size_stride(primals_34, (1, ), (1, )) assert_size_stride(primals_35, (1, ), (1, )) assert_size_stride(primals_36, (1, 4), (4, 1)) assert_size_stride(primals_37, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [unif_noise], Original ATen: [aten.rand_like] buf1 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p, add, log, sub, add_1, log_1, sub_1, add_2, log_2, add_3, sub_2, add_4, log_3, drop_prob, truediv, drop_prob_1, random_tensor, x, x_1], Original ATen: [aten.sigmoid, aten.add, aten.log, aten.rsub, aten.sub, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0.run(primals_1, primals_2, buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) buf5 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_1.run(primals_3, buf5, 1, 16, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((1, ), (1, ), torch.float32) buf8 = empty_strided_cuda((1, ), (1, ), torch.float32) buf9 = empty_strided_cuda((1, ), (1, ), torch.float32) buf131 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [p, sub, sum_of_square, pow_2, sum_2, sum_of_square_1, mul_1, weights_regularizer, log_4, dropout_regularizer, sub_7, log_5, mul_3, dropout_regularizer_1, dropout_regularizer_2, regularization_1], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div, aten.log] triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2.run(primals_4, primals_2, buf5, buf7, buf8, buf9, buf131, 1, 4, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [unif_noise_1], Original ATen: [aten.rand_like] buf10 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_2, p_1, add_7, log_6, sub_9, add_8, log_7, sub_10, add_9, log_8, add_10, sub_11, add_11, log_9, drop_prob_2, truediv_2, drop_prob_3, random_tensor_1, x_2, x_3], Original ATen: [aten.tanh, aten.sigmoid, aten.add, aten.log, aten.rsub, aten.sub, aten.div, aten.mul] triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3.run(buf4, primals_5, buf11, buf12, 256, grid=grid(256), stream=stream0) buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) buf14 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [pow_3, sum_3], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_1.run(primals_6, buf14, 1, 16, grid=grid(1), stream=stream0) buf16 = empty_strided_cuda((1, ), (1, ), torch.float32) buf17 = empty_strided_cuda((1, ), (1, ), torch.float32) buf18 = empty_strided_cuda((1, ), (1, ), torch.float32) buf130 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [p_1, sub_9, sum_of_square_2, pow_4, sum_4, sum_of_square_3, mul_5, weights_regularizer_1, log_10, dropout_regularizer_3, sub_16, log_11, mul_7, dropout_regularizer_4, dropout_regularizer_5, regularization_2], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div, aten.log] triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2.run(primals_7, primals_5, buf14, buf16, buf17, buf18, buf130, 1, 4, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [unif_noise_2], Original ATen: [aten.rand_like] buf19 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_4, p_2, add_14, log_12, sub_18, add_15, log_13, sub_19, add_16, log_14, add_17, sub_20, add_18, log_15, drop_prob_4, truediv_4, drop_prob_5, random_tensor_2, x_4, x_5], Original ATen: [aten.tanh, aten.sigmoid, aten.add, aten.log, aten.rsub, aten.sub, aten.div, aten.mul] triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3.run(buf13, primals_8, buf20, buf21, 256, grid=grid(256), stream=stream0) buf22 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf21, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf22) buf23 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [pow_5, sum_5], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_1.run(primals_9, buf23, 1, 16, grid=grid(1), stream=stream0) buf25 = empty_strided_cuda((1, ), (1, ), torch.float32) buf26 = empty_strided_cuda((1, ), (1, ), torch.float32) buf27 = empty_strided_cuda((1, ), (1, ), torch.float32) buf129 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [p_2, sub_18, sum_of_square_4, pow_6, sum_6, sum_of_square_5, mul_9, weights_regularizer_2, log_16, dropout_regularizer_6, sub_25, log_17, mul_11, dropout_regularizer_7, dropout_regularizer_8, regularization_3], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div, aten.log] triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2.run(primals_10, primals_8, buf23, buf25, buf26, buf27, buf129, 1, 4, grid=grid(1), stream=stream0) buf37 = empty_strided_cuda((9, ), (1, ), torch.float32) buf28 = reinterpret_tensor(buf37, (1, ), (1, ), 0) # alias buf29 = reinterpret_tensor(buf37, (1, ), (1, ), 1) # alias buf30 = reinterpret_tensor(buf37, (1, ), (1, ), 2) # alias buf31 = reinterpret_tensor(buf37, (1, ), (1, ), 3) # alias buf32 = reinterpret_tensor(buf37, (1, ), (1, ), 4) # alias buf33 = reinterpret_tensor(buf37, (1, ), (1, ), 5) # alias buf34 = reinterpret_tensor(buf37, (1, ), (1, ), 6) # alias buf35 = reinterpret_tensor(buf37, (1, ), (1, ), 7) # alias buf36 = reinterpret_tensor(buf37, (1, ), (1, ), 8) # alias # Unsorted Source Nodes: [], Original ATen: [] triton_for_fused_4.run(primals_11, primals_14, primals_17, primals_20, primals_23, primals_26, primals_29, primals_32, primals_35, buf28, buf29, buf30, buf31, buf32, buf33, buf34, buf35, buf36, grid=(9, 1, 1), stream=stream0) del primals_11 del primals_14 del primals_17 del primals_20 del primals_23 del primals_26 del primals_29 del primals_32 del primals_35 buf38 = empty_strided_cuda((9, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_24], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_5.run(buf37, buf38, 9, grid=grid(9), stream=stream0) del buf28 del buf29 del buf30 del buf31 del buf32 del buf33 del buf34 del buf35 del buf36 del buf37 # Topologically Sorted Source Nodes: [unif_noise_3], Original ATen: [aten.rand_like] buf39 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf40 = buf39 del buf39 buf117 = reinterpret_tensor(buf23, (1, ), (1, ), 0); del buf23 # reuse buf118 = empty_strided_cuda((1, ), (1, ), torch.float32) buf119 = empty_strided_cuda((1, ), (1, ), torch.float32) buf122 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub_99, pow_23, sum_23, sum_of_square_22, pow_24, sum_24, sum_of_square_23, mul_45, weights_regularizer_11, log_70, dropout_regularizer_33, sub_106, log_71, mul_47, dropout_regularizer_34, dropout_regularizer_35, regularization_12], Original ATen: [aten.rsub, aten.pow, aten.sum, aten.add, aten.mul, aten.div, aten.log] triton_per_fused_add_div_log_mul_pow_rsub_sum_6.run(primals_36, buf38, primals_37, buf117, buf118, buf119, buf122, 1, 4, grid=grid(1), stream=stream0) buf42 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_8, sum_8], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_7.run(primals_13, buf42, 1, 3, grid=grid(1), stream=stream0) buf47 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_10, sum_10], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_7.run(primals_16, buf47, 1, 3, grid=grid(1), stream=stream0) buf50 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_11, sum_11], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_8.run(primals_18, buf50, 1, 24, grid=grid(1), stream=stream0) buf51 = empty_strided_cuda((), (), torch.float32) buf126 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub_45, sum_of_square_10, pow_12, sum_12, sum_of_square_11, mul_21, weights_regularizer_5], Original ATen: [aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div] triton_per_fused_add_div_mul_pow_rsub_sum_9.run(primals_19, buf50, buf38, buf51, buf126, 1, 6, grid=grid(1), stream=stream0) buf56 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_14, sum_14], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_7.run(primals_22, buf56, 1, 3, grid=grid(1), stream=stream0) buf60 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_16, sum_16], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_7.run(primals_25, buf60, 1, 3, grid=grid(1), stream=stream0) buf64 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_17, sum_17], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_8.run(primals_27, buf64, 1, 24, grid=grid(1), stream=stream0) buf65 = empty_strided_cuda((), (), torch.float32) buf123 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub_72, sum_of_square_16, pow_18, sum_18, sum_of_square_17, mul_33, weights_regularizer_8], Original ATen: [aten.rsub, aten.add, aten.pow, aten.sum, aten.mul, aten.div] triton_per_fused_add_div_mul_pow_rsub_sum_10.run(primals_28, buf64, buf38, buf65, buf123, 1, 6, grid=grid(1), stream=stream0) buf68 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_19, sum_19], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_11.run(primals_30, buf68, 1, 4, grid=grid(1), stream=stream0) buf72 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [pow_21, sum_21], Original ATen: [aten.pow, aten.sum] triton_per_fused_pow_sum_11.run(primals_33, buf72, 1, 4, grid=grid(1), stream=stream0) buf121 = empty_strided_cuda((), (), torch.float32) buf124 = empty_strided_cuda((1, ), (1, ), torch.float32) buf125 = empty_strided_cuda((1, ), (1, ), torch.float32) buf127 = empty_strided_cuda((1, ), (1, ), torch.float32) buf128 = empty_strided_cuda((1, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub_27, pow_7, sum_7, sum_of_square_6, sum_of_square_7, mul_13, weights_regularizer_3, sub_36, pow_9, sum_9, sum_of_square_8, sum_of_square_9, mul_17, weights_regularizer_4, sub_54, pow_13, sum_13, sum_of_square_12, sum_of_square_13, mul_25, weights_regularizer_6, sub_63, pow_15, sum_15, sum_of_square_14, sum_of_square_15, mul_29, weights_regularizer_7, sum_25], Original ATen: [aten.rsub, aten.pow, aten.sum, aten.add, aten.mul, aten.div] triton_per_fused_add_div_mul_pow_rsub_sum_12.run(primals_21, primals_15, primals_24, primals_12, buf42, buf38, buf27, buf18, buf9, buf50, buf51, buf47, buf60, buf56, buf68, primals_31, buf64, buf65, buf119, buf72, primals_34, buf121, buf124, buf125, buf127, buf128, 1, 12, grid=grid(1), stream=stream0) del buf119 del buf18 del buf27 del buf42 del buf47 del buf50 del buf51 del buf56 del buf60 del buf64 del buf65 del buf68 del buf72 del buf9 # Topologically Sorted Source Nodes: [unif_noise_4], Original ATen: [aten.rand_like] buf44 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf45 = buf44 del buf44 # Topologically Sorted Source Nodes: [unif_noise_5], Original ATen: [aten.rand_like] buf48 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf49 = buf48 del buf48 # Topologically Sorted Source Nodes: [unif_noise_6], Original ATen: [aten.rand_like] buf53 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf54 = buf53 del buf53 # Topologically Sorted Source Nodes: [unif_noise_7], Original ATen: [aten.rand_like] buf57 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf58 = buf57 del buf57 # Topologically Sorted Source Nodes: [unif_noise_8], Original ATen: [aten.rand_like] buf62 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf63 = buf62 del buf62 # Topologically Sorted Source Nodes: [unif_noise_9], Original ATen: [aten.rand_like] buf66 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf67 = buf66 del buf66 # Topologically Sorted Source Nodes: [unif_noise_10], Original ATen: [aten.rand_like] buf70 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf71 = buf70 del buf70 # Topologically Sorted Source Nodes: [unif_noise_11], Original ATen: [aten.rand_like] buf73 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf74 = buf73 del buf73 buf84 = empty_strided_cuda((36, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf75 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [add_21, log_18, sub_27, add_22, log_19, sub_28, add_23, log_20, add_24, sub_29, add_25, log_21, drop_prob_6, truediv_6], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_13.run(buf38, buf40, buf75, 256, grid=grid(256), stream=stream0) buf76 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 256) # alias # Topologically Sorted Source Nodes: [add_28, log_24, sub_36, add_29, log_25, sub_37, add_30, log_26, add_31, sub_38, add_32, log_27, drop_prob_8, truediv_8], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_14.run(buf38, buf45, buf76, 256, grid=grid(256), stream=stream0) buf77 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 512) # alias # Topologically Sorted Source Nodes: [add_35, log_30, sub_45, add_36, log_31, sub_46, add_37, log_32, add_38, sub_47, add_39, log_33, drop_prob_10, truediv_10], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_15.run(buf38, buf49, buf77, 256, grid=grid(256), stream=stream0) buf78 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 768) # alias # Topologically Sorted Source Nodes: [add_42, log_36, sub_54, add_43, log_37, sub_55, add_44, log_38, add_45, sub_56, add_46, log_39, drop_prob_12, truediv_12], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_16.run(buf38, buf54, buf78, 256, grid=grid(256), stream=stream0) buf79 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1024) # alias # Topologically Sorted Source Nodes: [add_49, log_42, sub_63, add_50, log_43, sub_64, add_51, log_44, add_52, sub_65, add_53, log_45, drop_prob_14, truediv_14], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_17.run(buf38, buf58, buf79, 256, grid=grid(256), stream=stream0) buf80 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1280) # alias # Topologically Sorted Source Nodes: [add_56, log_48, sub_72, add_57, log_49, sub_73, add_58, log_50, add_59, sub_74, add_60, log_51, drop_prob_16, truediv_16], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_18.run(buf38, buf63, buf80, 256, grid=grid(256), stream=stream0) buf81 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1536) # alias # Topologically Sorted Source Nodes: [add_63, log_54, sub_81, add_64, log_55, sub_82, add_65, log_56, add_66, sub_83, add_67, log_57, drop_prob_18, truediv_18], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_19.run(buf38, buf67, buf81, 256, grid=grid(256), stream=stream0) buf82 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1792) # alias # Topologically Sorted Source Nodes: [add_70, log_60, sub_90, add_71, log_61, sub_91, add_72, log_62, add_73, sub_92, add_74, log_63, drop_prob_20, truediv_20], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_20.run(buf38, buf71, buf82, 256, grid=grid(256), stream=stream0) buf83 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 2048) # alias # Topologically Sorted Source Nodes: [add_77, log_66, sub_99, add_78, log_67, sub_100, add_79, log_68, add_80, sub_101, add_81, log_69, drop_prob_22, truediv_22], Original ATen: [aten.add, aten.log, aten.rsub, aten.sub, aten.div] triton_poi_fused_add_div_log_rsub_sub_21.run(buf38, buf74, buf83, 256, grid=grid(256), stream=stream0) buf85 = empty_strided_cuda((9, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_25], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_22.run(buf84, buf85, 2304, grid=grid(2304), stream=stream0) del buf75 del buf76 del buf77 del buf78 del buf79 del buf80 del buf81 del buf82 del buf83 del buf84 buf86 = buf74; del buf74 # reuse buf87 = buf71; del buf71 # reuse buf90 = buf67; del buf67 # reuse buf91 = buf63; del buf63 # reuse buf94 = buf58; del buf58 # reuse buf95 = buf54; del buf54 # reuse buf97 = buf49; del buf49 # reuse buf98 = buf45; del buf45 # reuse buf100 = buf40; del buf40 # reuse buf101 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf103 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf104 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf106 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf107 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf109 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf110 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf112 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf113 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_6, sub_27, sub_36, sub_45, sub_54, sub_63, sub_72, sub_81, sub_90, sub_99, random_tensor_10, x_20, x_21, random_tensor_9, x_18, x_19, random_tensor_8, x_16, x_17, random_tensor_7, x_14, x_15, random_tensor_6, x_12, x_13, random_tensor_5, x_10, x_11, random_tensor_4, x_8, x_9, random_tensor_3, x_6, x_7, random_tensor_11, x_22, x_23], Original ATen: [aten.tanh, aten.rsub, aten.mul, aten.div] triton_poi_fused_div_mul_rsub_tanh_23.run(buf85, buf22, buf38, buf86, buf87, buf90, buf91, buf94, buf95, buf97, buf98, buf100, buf101, buf103, buf104, buf106, buf107, buf109, buf110, buf112, buf113, 256, grid=grid(256), stream=stream0) buf89 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_34, reinterpret_tensor(buf87, (64, 4), (4, 1), 0), reinterpret_tensor(primals_33, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf89) buf93 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_31, reinterpret_tensor(buf91, (64, 4), (4, 1), 0), reinterpret_tensor(primals_30, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf93) buf96 = empty_strided_cuda((64, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_28, reinterpret_tensor(buf95, (64, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 6), (1, 4), 0), alpha=1, beta=1, out=buf96) buf99 = empty_strided_cuda((64, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_25, reinterpret_tensor(buf98, (64, 4), (4, 1), 0), reinterpret_tensor(primals_24, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf99) buf102 = empty_strided_cuda((64, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_22, reinterpret_tensor(buf101, (64, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf102) buf105 = empty_strided_cuda((64, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_19, reinterpret_tensor(buf104, (64, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 6), (1, 4), 0), alpha=1, beta=1, out=buf105) buf108 = empty_strided_cuda((64, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, reinterpret_tensor(buf107, (64, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf108) buf111 = empty_strided_cuda((64, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, reinterpret_tensor(buf110, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf111) buf115 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_37, reinterpret_tensor(buf113, (64, 4), (4, 1), 0), reinterpret_tensor(primals_36, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf115) return (reinterpret_tensor(buf111, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf108, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf105, (4, 4, 4, 6), (96, 24, 6, 1), 0), reinterpret_tensor(buf102, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf99, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf96, (4, 4, 4, 6), (96, 24, 6, 1), 0), reinterpret_tensor(buf93, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf89, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf115, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf121, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_12, primals_13, primals_15, primals_16, primals_18, primals_19, primals_21, primals_22, primals_24, primals_25, primals_27, primals_28, primals_30, primals_31, primals_33, primals_34, primals_36, primals_37, buf2, buf3, buf4, buf7, buf8, buf11, buf12, buf13, buf16, buf17, buf20, buf21, buf22, buf25, buf26, buf38, reinterpret_tensor(buf38, (1, ), (1, ), 0), reinterpret_tensor(buf38, (1, ), (1, ), 1), reinterpret_tensor(buf38, (1, ), (1, ), 2), reinterpret_tensor(buf38, (1, ), (1, ), 3), reinterpret_tensor(buf38, (1, ), (1, ), 4), reinterpret_tensor(buf38, (1, ), (1, ), 5), reinterpret_tensor(buf38, (1, ), (1, ), 6), reinterpret_tensor(buf38, (1, ), (1, ), 7), reinterpret_tensor(buf38, (1, ), (1, ), 8), buf85, buf86, buf87, buf90, buf91, buf94, buf95, buf97, buf98, buf100, buf101, buf103, buf104, buf106, buf107, buf109, buf110, buf112, buf113, buf117, buf118, buf122, buf123, buf124, buf125, buf126, buf127, buf128, buf129, buf130, buf131, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((6, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((6, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn class ConcreteDropout(nn.Module): def __init__(self, weight_regularizer=1e-06, dropout_regularizer=1e-05, init_min=0.1, init_max=0.1): super(ConcreteDropout, self).__init__() self.weight_regularizer = weight_regularizer self.dropout_regularizer = dropout_regularizer init_min = np.log(init_min) - np.log(1.0 - init_min) init_max = np.log(init_max) - np.log(1.0 - init_max) self.p_logit = nn.Parameter(torch.empty(1).uniform_(init_min, init_max) ) def forward(self, x, layer): p = torch.sigmoid(self.p_logit) out = layer(self._concrete_dropout(x, p)) sum_of_square = 0 for param in layer.parameters(): sum_of_square += torch.sum(torch.pow(param, 2)) weights_regularizer = self.weight_regularizer * sum_of_square / (1 - p) dropout_regularizer = p * torch.log(p) dropout_regularizer += (1.0 - p) * torch.log(1.0 - p) input_dimensionality = x[0].numel() dropout_regularizer *= self.dropout_regularizer * input_dimensionality regularization = weights_regularizer + dropout_regularizer return out, regularization def _concrete_dropout(self, x, p): eps = 1e-07 temp = 0.1 unif_noise = torch.rand_like(x) drop_prob = torch.log(p + eps) - torch.log(1 - p + eps) + torch.log( unif_noise + eps) - torch.log(1 - unif_noise + eps) drop_prob = torch.sigmoid(drop_prob / temp) random_tensor = 1 - drop_prob retain_prob = 1 - p x = torch.mul(x, random_tensor) x /= retain_prob return x class ConcreteDenseMixture(nn.Module): def __init__(self, X_dim, Y_dim, nb_features, weight_regularizer, dropout_regularizer, verbose=True): super(ConcreteDenseMixture, self).__init__() self.verbose = verbose self.rank = 2 self.linear1 = nn.Linear(X_dim, nb_features) self.linear2 = nn.Linear(nb_features, nb_features) self.linear3 = nn.Linear(nb_features, nb_features) self.linear4_mu = nn.Linear(nb_features, Y_dim - 1) self.linear4_logvar = nn.Linear(nb_features, Y_dim - 1) self.linear4_F = nn.Linear(nb_features, (Y_dim - 1) * self.rank) self.linear4_mu2 = nn.Linear(nb_features, Y_dim - 1) self.linear4_logvar2 = nn.Linear(nb_features, Y_dim - 1) self.linear4_F2 = nn.Linear(nb_features, (Y_dim - 1) * self.rank) self.linear4_alpha = nn.Linear(nb_features, 1) self.linear4_mu_classifier = nn.Linear(nb_features, 1) self.linear4_logvar_classifier = nn.Linear(nb_features, 1) self.conc_drop1 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop3 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_mu = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_logvar = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_F = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_mu2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_logvar2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_F2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_alpha = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_mu_classifier = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_logvar_classifier = ConcreteDropout(weight_regularizer =weight_regularizer, dropout_regularizer=dropout_regularizer) self.tanh = nn.Tanh() def forward(self, x): regularization = torch.empty(12, device=x.device) x1, regularization[0] = self.conc_drop1(x, nn.Sequential(self. linear1, self.tanh)) x2, regularization[1] = self.conc_drop2(x1, nn.Sequential(self. linear2, self.tanh)) x3, regularization[2] = self.conc_drop3(x2, nn.Sequential(self. linear3, self.tanh)) mean, regularization[3] = self.conc_drop_mu(x3, self.linear4_mu) logvar, regularization[4] = self.conc_drop_logvar(x3, self. linear4_logvar) F, regularization[5] = self.conc_drop_F(x3, self.linear4_F) mean2, regularization[6] = self.conc_drop_mu2(x3, self.linear4_mu2) logvar2, regularization[7] = self.conc_drop_logvar2(x3, self. linear4_logvar2) F2, regularization[8] = self.conc_drop_F2(x3, self.linear4_F2) alpha, regularization[9] = self.conc_drop_alpha(x3, self.linear4_alpha) mean_classifier, regularization[10] = self.conc_drop_mu_classifier(x3, self.linear4_mu_classifier) logvar_classifier, regularization[11 ] = self.conc_drop_logvar_classifier(x3, self. linear4_logvar_classifier) return (mean, logvar, F, mean2, logvar2, F2, alpha, mean_classifier, logvar_classifier, regularization.sum()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'X_dim': 4, 'Y_dim': 4, 'nb_features': 4, 'weight_regularizer': 4, 'dropout_regularizer': 0.5}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp12 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-07 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = 1.0 tmp8 = tmp7 - tmp3 tmp9 = tmp8 + tmp4 tmp10 = tl_math.log(tmp9) tmp11 = tmp6 - tmp10 tmp13 = tmp12 + tmp4 tmp14 = tl_math.log(tmp13) tmp15 = tmp11 + tmp14 tmp16 = tmp7 - tmp12 tmp17 = tmp16 + tmp4 tmp18 = tl_math.log(tmp17) tmp19 = tmp15 - tmp18 tmp20 = 10.0 tmp21 = tmp19 * tmp20 tmp22 = tl.sigmoid(tmp21) tmp23 = tmp7 - tmp22 tmp24 = tmp0 * tmp23 tmp25 = tmp24 / tmp8 tl.store(out_ptr0 + x0, tmp25, xmask) @triton.jit def triton_per_fused_pow_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp11 = tl.load(in_ptr2 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp7 = tl.sigmoid(tmp6) tmp8 = tl_math.log(tmp7) tmp9 = 1.0 tmp10 = tmp9 - tmp7 tmp13 = 0.0 tmp14 = tmp12 + tmp13 tmp15 = tmp14 + tmp4 tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 / tmp10 tmp19 = tmp7 * tmp8 tmp20 = tl_math.log(tmp10) tmp21 = tmp10 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 32.0 tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tmp18 / tmp10 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) tl.store(out_ptr3 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) @triton.jit def triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp13 = tl.load(in_ptr2 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = 1e-07 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp4 tmp10 = tmp9 + tmp5 tmp11 = tl_math.log(tmp10) tmp12 = tmp7 - tmp11 tmp14 = tmp13 + tmp5 tmp15 = tl_math.log(tmp14) tmp16 = tmp12 + tmp15 tmp17 = tmp8 - tmp13 tmp18 = tmp17 + tmp5 tmp19 = tl_math.log(tmp18) tmp20 = tmp16 - tmp19 tmp21 = 10.0 tmp22 = tmp20 * tmp21 tmp23 = tl.sigmoid(tmp22) tmp24 = tmp8 - tmp23 tmp25 = tmp1 * tmp24 tmp26 = tmp25 / tmp9 tl.store(out_ptr0 + x0, tmp26, xmask) @triton.jit def triton_for_fused_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8): pid = tl.program_id(0) XBLOCK: tl.constexpr = 1024 num_xblocks_0 = tl.cdiv(1, XBLOCK) num_xblocks_1 = num_xblocks_0 + tl.cdiv(1, XBLOCK) num_xblocks_2 = num_xblocks_1 + tl.cdiv(1, XBLOCK) num_xblocks_3 = num_xblocks_2 + tl.cdiv(1, XBLOCK) num_xblocks_4 = num_xblocks_3 + tl.cdiv(1, XBLOCK) num_xblocks_5 = num_xblocks_4 + tl.cdiv(1, XBLOCK) num_xblocks_6 = num_xblocks_5 + tl.cdiv(1, XBLOCK) num_xblocks_7 = num_xblocks_6 + tl.cdiv(1, XBLOCK) num_xblocks_8 = num_xblocks_7 + tl.cdiv(1, XBLOCK) if pid < num_xblocks_0: pid_offset = pid xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp1, None) elif pid < num_xblocks_1: pid_offset = pid - num_xblocks_0 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tl.store(out_ptr1 + tl.full([XBLOCK], 0, tl.int32), tmp3, None) elif pid < num_xblocks_2: pid_offset = pid - num_xblocks_1 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tl.store(out_ptr2 + tl.full([XBLOCK], 0, tl.int32), tmp5, None) elif pid < num_xblocks_3: pid_offset = pid - num_xblocks_2 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp6 = tl.load(in_ptr3 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tl.store(out_ptr3 + tl.full([XBLOCK], 0, tl.int32), tmp7, None) elif pid < num_xblocks_4: pid_offset = pid - num_xblocks_3 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp8 = tl.load(in_ptr4 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tl.store(out_ptr4 + tl.full([XBLOCK], 0, tl.int32), tmp9, None) elif pid < num_xblocks_5: pid_offset = pid - num_xblocks_4 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp10 = tl.load(in_ptr5 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tl.store(out_ptr5 + tl.full([XBLOCK], 0, tl.int32), tmp11, None) elif pid < num_xblocks_6: pid_offset = pid - num_xblocks_5 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp12 = tl.load(in_ptr6 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tl.store(out_ptr6 + tl.full([XBLOCK], 0, tl.int32), tmp13, None) elif pid < num_xblocks_7: pid_offset = pid - num_xblocks_6 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp14 = tl.load(in_ptr7 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tl.store(out_ptr7 + tl.full([XBLOCK], 0, tl.int32), tmp15, None) elif pid < num_xblocks_8: pid_offset = pid - num_xblocks_7 xoffset = pid_offset * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp16 = tl.load(in_ptr8 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tl.store(out_ptr8 + tl.full([XBLOCK], 0, tl.int32), tmp17, None) else: pass @triton.jit def triton_poi_fused_sigmoid_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 9 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_per_fused_add_div_log_mul_pow_rsub_sum_6(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + 8) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp12 = tl.load(in_ptr2 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp7 = tl_math.log(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp6 tmp10 = 0.0 tmp11 = tmp4 + tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 / tmp9 tmp19 = tmp6 * tmp7 tmp20 = tl_math.log(tmp9) tmp21 = tmp9 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 32.0 tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tmp18 / tmp9 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) tl.store(out_ptr3 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) @triton.jit def triton_per_fused_pow_sum_7(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 3 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_pow_sum_8(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 6 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1]) tmp13 = tl.load(in_ptr2 + 2) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp8 = 0.0 tmp9 = tmp7 + tmp8 tmp10 = tmp9 + tmp5 tmp11 = 4.0 tmp12 = tmp10 * tmp11 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp12 / tmp16 tmp18 = tmp17 / tmp16 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 6 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1]) tmp13 = tl.load(in_ptr2 + 5) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp8 = 0.0 tmp9 = tmp7 + tmp8 tmp10 = tmp9 + tmp5 tmp11 = 4.0 tmp12 = tmp10 * tmp11 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp12 / tmp16 tmp18 = tmp17 / tmp16 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_pow_sum_11(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_add_div_mul_pow_rsub_sum_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp6 = tl.load(in_ptr1 + r0, rmask, other=0.0) tmp12 = tl.load(in_ptr2 + r0, rmask, other=0.0) tmp18 = tl.load(in_ptr3 + r0, rmask, other=0.0) tmp29 = tl.load(in_ptr4 + 0) tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp34 = tl.load(in_ptr5 + 0) tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp49 = tl.load(in_ptr6 + 0) tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp53 = tl.load(in_ptr7 + 0) tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK]) tmp57 = tl.load(in_ptr8 + 0) tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK]) tmp66 = tl.load(in_ptr9 + 0) tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.load(in_ptr10 + 0) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK]) tmp73 = tl.load(in_ptr5 + 2) tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp87 = tl.load(in_ptr11 + 0) tmp88 = tl.broadcast_to(tmp87, [XBLOCK, RBLOCK]) tmp91 = tl.load(in_ptr5 + 1) tmp92 = tl.broadcast_to(tmp91, [XBLOCK, RBLOCK]) tmp107 = tl.load(in_ptr12 + 0) tmp108 = tl.broadcast_to(tmp107, [XBLOCK, RBLOCK]) tmp111 = tl.load(in_ptr5 + 4) tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK]) tmp125 = tl.load(in_ptr13 + 0) tmp126 = tl.broadcast_to(tmp125, [XBLOCK, RBLOCK]) tmp129 = tl.load(in_ptr5 + 3) tmp130 = tl.broadcast_to(tmp129, [XBLOCK, RBLOCK]) tmp144 = tl.load(in_ptr14 + 0) tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = tl.load(in_ptr15 + 0) tmp148 = tl.broadcast_to(tmp147, [XBLOCK, RBLOCK]) tmp152 = tl.load(in_ptr5 + 6) tmp153 = tl.broadcast_to(tmp152, [XBLOCK, RBLOCK]) tmp165 = tl.load(in_ptr16 + 0) tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK]) tmp168 = tl.load(in_ptr17 + 0) tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK]) tmp172 = tl.load(in_ptr5 + 5) tmp173 = tl.broadcast_to(tmp172, [XBLOCK, RBLOCK]) tmp187 = tl.load(in_ptr18 + 0) tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp191 = tl.load(in_ptr19 + 0) tmp192 = tl.broadcast_to(tmp191, [XBLOCK, RBLOCK]) tmp194 = tl.load(in_ptr20 + 0) tmp195 = tl.broadcast_to(tmp194, [XBLOCK, RBLOCK]) tmp199 = tl.load(in_ptr5 + 7) tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK]) tmp216 = tl.broadcast_to(tmp107, [XBLOCK, 1]) tmp219 = tl.broadcast_to(tmp111, [XBLOCK, 1]) tmp223 = tl.broadcast_to(tmp125, [XBLOCK, 1]) tmp226 = tl.broadcast_to(tmp129, [XBLOCK, 1]) tmp230 = tl.broadcast_to(tmp87, [XBLOCK, 1]) tmp233 = tl.broadcast_to(tmp91, [XBLOCK, 1]) tmp237 = tl.broadcast_to(tmp29, [XBLOCK, 1]) tmp240 = tl.broadcast_to(tmp34, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(rmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(rmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = r0 tmp25 = tl.full([1, 1], 3, tl.int32) tmp26 = tmp24 == tmp25 tmp27 = 0.0 tmp28 = tmp23 + tmp27 tmp31 = tmp28 + tmp30 tmp32 = 4.0 tmp33 = tmp31 * tmp32 tmp36 = 1.0 tmp37 = tmp36 - tmp35 tmp38 = tmp33 / tmp37 tmp39 = tl_math.log(tmp35) tmp40 = tmp35 * tmp39 tmp41 = tl_math.log(tmp37) tmp42 = tmp37 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = 32.0 tmp45 = tmp43 * tmp44 tmp46 = tmp38 + tmp45 tmp47 = tl.full([1, 1], 2, tl.int32) tmp48 = tmp24 == tmp47 tmp51 = tl.full([1, 1], 1, tl.int32) tmp52 = tmp24 == tmp51 tmp55 = tl.full([1, 1], 0, tl.int32) tmp56 = tmp24 == tmp55 tmp59 = float('nan') tmp60 = tl.where(tmp56, tmp58, tmp59) tmp61 = tl.where(tmp52, tmp54, tmp60) tmp62 = tl.where(tmp48, tmp50, tmp61) tmp63 = tl.where(tmp26, tmp46, tmp62) tmp64 = tl.full([1, 1], 5, tl.int32) tmp65 = tmp24 == tmp64 tmp68 = tmp67 + tmp27 tmp71 = tmp68 + tmp70 tmp72 = tmp71 * tmp32 tmp75 = tmp36 - tmp74 tmp76 = tmp72 / tmp75 tmp77 = tl_math.log(tmp74) tmp78 = tmp74 * tmp77 tmp79 = tl_math.log(tmp75) tmp80 = tmp75 * tmp79 tmp81 = tmp78 + tmp80 tmp82 = tmp81 * tmp44 tmp83 = tmp76 + tmp82 tmp84 = tl.full([1, 1], 4, tl.int32) tmp85 = tmp24 == tmp84 tmp86 = tmp11 + tmp27 tmp89 = tmp86 + tmp88 tmp90 = tmp89 * tmp32 tmp93 = tmp36 - tmp92 tmp94 = tmp90 / tmp93 tmp95 = tl_math.log(tmp92) tmp96 = tmp92 * tmp95 tmp97 = tl_math.log(tmp93) tmp98 = tmp93 * tmp97 tmp99 = tmp96 + tmp98 tmp100 = tmp99 * tmp44 tmp101 = tmp94 + tmp100 tmp102 = tl.where(tmp85, tmp101, tmp63) tmp103 = tl.where(tmp65, tmp83, tmp102) tmp104 = tl.full([1, 1], 7, tl.int32) tmp105 = tmp24 == tmp104 tmp106 = tmp17 + tmp27 tmp109 = tmp106 + tmp108 tmp110 = tmp109 * tmp32 tmp113 = tmp36 - tmp112 tmp114 = tmp110 / tmp113 tmp115 = tl_math.log(tmp112) tmp116 = tmp112 * tmp115 tmp117 = tl_math.log(tmp113) tmp118 = tmp113 * tmp117 tmp119 = tmp116 + tmp118 tmp120 = tmp119 * tmp44 tmp121 = tmp114 + tmp120 tmp122 = tl.full([1, 1], 6, tl.int32) tmp123 = tmp24 == tmp122 tmp124 = tmp5 + tmp27 tmp127 = tmp124 + tmp126 tmp128 = tmp127 * tmp32 tmp131 = tmp36 - tmp130 tmp132 = tmp128 / tmp131 tmp133 = tl_math.log(tmp130) tmp134 = tmp130 * tmp133 tmp135 = tl_math.log(tmp131) tmp136 = tmp131 * tmp135 tmp137 = tmp134 + tmp136 tmp138 = tmp137 * tmp44 tmp139 = tmp132 + tmp138 tmp140 = tl.where(tmp123, tmp139, tmp103) tmp141 = tl.where(tmp105, tmp121, tmp140) tmp142 = tl.full([1, 1], 9, tl.int32) tmp143 = tmp24 == tmp142 tmp146 = tmp145 + tmp27 tmp149 = tmp148 * tmp148 tmp150 = tmp146 + tmp149 tmp151 = tmp150 * tmp32 tmp154 = tmp36 - tmp153 tmp155 = tmp151 / tmp154 tmp156 = tl_math.log(tmp153) tmp157 = tmp153 * tmp156 tmp158 = tl_math.log(tmp154) tmp159 = tmp154 * tmp158 tmp160 = tmp157 + tmp159 tmp161 = tmp160 * tmp44 tmp162 = tmp155 + tmp161 tmp163 = tl.full([1, 1], 8, tl.int32) tmp164 = tmp24 == tmp163 tmp167 = tmp166 + tmp27 tmp170 = tmp167 + tmp169 tmp171 = tmp170 * tmp32 tmp174 = tmp36 - tmp173 tmp175 = tmp171 / tmp174 tmp176 = tl_math.log(tmp173) tmp177 = tmp173 * tmp176 tmp178 = tl_math.log(tmp174) tmp179 = tmp174 * tmp178 tmp180 = tmp177 + tmp179 tmp181 = tmp180 * tmp44 tmp182 = tmp175 + tmp181 tmp183 = tl.where(tmp164, tmp182, tmp141) tmp184 = tl.where(tmp143, tmp162, tmp183) tmp185 = tl.full([1, 1], 11, tl.int32) tmp186 = tmp24 == tmp185 tmp189 = tl.full([1, 1], 10, tl.int32) tmp190 = tmp24 == tmp189 tmp193 = tmp192 + tmp27 tmp196 = tmp195 * tmp195 tmp197 = tmp193 + tmp196 tmp198 = tmp197 * tmp32 tmp201 = tmp36 - tmp200 tmp202 = tmp198 / tmp201 tmp203 = tl_math.log(tmp200) tmp204 = tmp200 * tmp203 tmp205 = tl_math.log(tmp201) tmp206 = tmp201 * tmp205 tmp207 = tmp204 + tmp206 tmp208 = tmp207 * tmp44 tmp209 = tmp202 + tmp208 tmp210 = tl.where(tmp190, tmp209, tmp184) tmp211 = tl.where(tmp186, tmp188, tmp210) tmp212 = tl.broadcast_to(tmp211, [XBLOCK, RBLOCK]) tmp214 = tl.where(rmask, tmp212, 0) tmp215 = tl.sum(tmp214, 1)[:, None] tmp217 = tmp106 + tmp216 tmp218 = tmp217 * tmp32 tmp220 = tmp36 - tmp219 tmp221 = tmp218 / tmp220 tmp222 = tmp221 / tmp220 tmp224 = tmp124 + tmp223 tmp225 = tmp224 * tmp32 tmp227 = tmp36 - tmp226 tmp228 = tmp225 / tmp227 tmp229 = tmp228 / tmp227 tmp231 = tmp86 + tmp230 tmp232 = tmp231 * tmp32 tmp234 = tmp36 - tmp233 tmp235 = tmp232 / tmp234 tmp236 = tmp235 / tmp234 tmp238 = tmp28 + tmp237 tmp239 = tmp238 * tmp32 tmp241 = tmp36 - tmp240 tmp242 = tmp239 / tmp241 tmp243 = tmp242 / tmp241 tl.store(out_ptr5 + tl.full([XBLOCK, 1], 0, tl.int32), tmp222, None) tl.store(out_ptr6 + tl.full([XBLOCK, 1], 0, tl.int32), tmp229, None) tl.store(out_ptr7 + tl.full([XBLOCK, 1], 0, tl.int32), tmp236, None) tl.store(out_ptr8 + tl.full([XBLOCK, 1], 0, tl.int32), tmp243, None) tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp215, None) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 1) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 7) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_add_div_log_rsub_sub_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 8) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp6 + tmp2 tmp8 = tl_math.log(tmp7) tmp9 = tmp4 - tmp8 tmp11 = tmp10 + tmp2 tmp12 = tl_math.log(tmp11) tmp13 = tmp9 + tmp12 tmp14 = tmp5 - tmp10 tmp15 = tmp14 + tmp2 tmp16 = tl_math.log(tmp15) tmp17 = tmp13 - tmp16 tmp18 = 10.0 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x0, tmp19, xmask) @triton.jit def triton_poi_fused_sigmoid_22(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_div_mul_rsub_tanh_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1792 + x0), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + 7) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp10 = tl.load(in_ptr0 + (1536 + x0), xmask) tmp13 = tl.load(in_ptr2 + 6) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (1280 + x0), xmask) tmp20 = tl.load(in_ptr2 + 5) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp24 = tl.load(in_ptr0 + (1024 + x0), xmask) tmp27 = tl.load(in_ptr2 + 4) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp31 = tl.load(in_ptr0 + (768 + x0), xmask) tmp34 = tl.load(in_ptr2 + 3) tmp35 = tl.broadcast_to(tmp34, [XBLOCK]) tmp38 = tl.load(in_ptr0 + (512 + x0), xmask) tmp41 = tl.load(in_ptr2 + 2) tmp42 = tl.broadcast_to(tmp41, [XBLOCK]) tmp45 = tl.load(in_ptr0 + (256 + x0), xmask) tmp48 = tl.load(in_ptr2 + 1) tmp49 = tl.broadcast_to(tmp48, [XBLOCK]) tmp52 = tl.load(in_ptr0 + x0, xmask) tmp55 = tl.load(in_ptr2 + 0) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp59 = tl.load(in_ptr0 + (2048 + x0), xmask) tmp62 = tl.load(in_ptr2 + 8) tmp63 = tl.broadcast_to(tmp62, [XBLOCK]) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = libdevice.tanh(tmp3) tmp5 = tmp4 * tmp2 tmp8 = tmp1 - tmp7 tmp9 = tmp5 / tmp8 tmp11 = tmp1 - tmp10 tmp12 = tmp4 * tmp11 tmp15 = tmp1 - tmp14 tmp16 = tmp12 / tmp15 tmp18 = tmp1 - tmp17 tmp19 = tmp4 * tmp18 tmp22 = tmp1 - tmp21 tmp23 = tmp19 / tmp22 tmp25 = tmp1 - tmp24 tmp26 = tmp4 * tmp25 tmp29 = tmp1 - tmp28 tmp30 = tmp26 / tmp29 tmp32 = tmp1 - tmp31 tmp33 = tmp4 * tmp32 tmp36 = tmp1 - tmp35 tmp37 = tmp33 / tmp36 tmp39 = tmp1 - tmp38 tmp40 = tmp4 * tmp39 tmp43 = tmp1 - tmp42 tmp44 = tmp40 / tmp43 tmp46 = tmp1 - tmp45 tmp47 = tmp4 * tmp46 tmp50 = tmp1 - tmp49 tmp51 = tmp47 / tmp50 tmp53 = tmp1 - tmp52 tmp54 = tmp4 * tmp53 tmp57 = tmp1 - tmp56 tmp58 = tmp54 / tmp57 tmp60 = tmp1 - tmp59 tmp61 = tmp4 * tmp60 tmp64 = tmp1 - tmp63 tmp65 = tmp61 / tmp64 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp9, xmask) tl.store(out_ptr2 + x0, tmp11, xmask) tl.store(out_ptr3 + x0, tmp16, xmask) tl.store(out_ptr4 + x0, tmp18, xmask) tl.store(out_ptr5 + x0, tmp23, xmask) tl.store(out_ptr6 + x0, tmp25, xmask) tl.store(out_ptr7 + x0, tmp30, xmask) tl.store(out_ptr8 + x0, tmp32, xmask) tl.store(out_ptr9 + x0, tmp37, xmask) tl.store(out_ptr10 + x0, tmp39, xmask) tl.store(out_ptr11 + x0, tmp44, xmask) tl.store(out_ptr12 + x0, tmp46, xmask) tl.store(out_ptr13 + x0, tmp51, xmask) tl.store(out_ptr14 + x0, tmp53, xmask) tl.store(out_ptr15 + x0, tmp58, xmask) tl.store(out_ptr16 + x0, tmp60, xmask) tl.store(out_ptr17 + x0, tmp65, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (1,), (1,)) assert_size_stride(primals_12, (3, 4), (4, 1)) assert_size_stride(primals_13, (3,), (1,)) assert_size_stride(primals_14, (1,), (1,)) assert_size_stride(primals_15, (3, 4), (4, 1)) assert_size_stride(primals_16, (3,), (1,)) assert_size_stride(primals_17, (1,), (1,)) assert_size_stride(primals_18, (6, 4), (4, 1)) assert_size_stride(primals_19, (6,), (1,)) assert_size_stride(primals_20, (1,), (1,)) assert_size_stride(primals_21, (3, 4), (4, 1)) assert_size_stride(primals_22, (3,), (1,)) assert_size_stride(primals_23, (1,), (1,)) assert_size_stride(primals_24, (3, 4), (4, 1)) assert_size_stride(primals_25, (3,), (1,)) assert_size_stride(primals_26, (1,), (1,)) assert_size_stride(primals_27, (6, 4), (4, 1)) assert_size_stride(primals_28, (6,), (1,)) assert_size_stride(primals_29, (1,), (1,)) assert_size_stride(primals_30, (1, 4), (4, 1)) assert_size_stride(primals_31, (1,), (1,)) assert_size_stride(primals_32, (1,), (1,)) assert_size_stride(primals_33, (1, 4), (4, 1)) assert_size_stride(primals_34, (1,), (1,)) assert_size_stride(primals_35, (1,), (1,)) assert_size_stride(primals_36, (1, 4), (4, 1)) assert_size_stride(primals_37, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_0[grid(256)]( primals_1, primals_2, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) buf5 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_1[grid(1)](primals_3, buf5, 1, 16, XBLOCK= 1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((1,), (1,), torch.float32) buf8 = empty_strided_cuda((1,), (1,), torch.float32) buf9 = empty_strided_cuda((1,), (1,), torch.float32) buf131 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2[grid(1)]( primals_4, primals_2, buf5, buf7, buf8, buf9, buf131, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf10 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3[grid(256)]( buf4, primals_5, buf11, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) buf14 = buf5 del buf5 triton_per_fused_pow_sum_1[grid(1)](primals_6, buf14, 1, 16, XBLOCK =1, num_warps=2, num_stages=1) buf16 = empty_strided_cuda((1,), (1,), torch.float32) buf17 = empty_strided_cuda((1,), (1,), torch.float32) buf18 = empty_strided_cuda((1,), (1,), torch.float32) buf130 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2[grid(1)]( primals_7, primals_5, buf14, buf16, buf17, buf18, buf130, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf19 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_log_mul_rsub_sigmoid_sub_tanh_3[grid(256)]( buf13, primals_8, buf20, buf21, 256, XBLOCK=128, num_warps=4, num_stages=1) buf22 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf21, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf22) buf23 = buf14 del buf14 triton_per_fused_pow_sum_1[grid(1)](primals_9, buf23, 1, 16, XBLOCK =1, num_warps=2, num_stages=1) buf25 = empty_strided_cuda((1,), (1,), torch.float32) buf26 = empty_strided_cuda((1,), (1,), torch.float32) buf27 = empty_strided_cuda((1,), (1,), torch.float32) buf129 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_log_mul_pow_rsub_sigmoid_sum_2[grid(1)]( primals_10, primals_8, buf23, buf25, buf26, buf27, buf129, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf37 = empty_strided_cuda((9,), (1,), torch.float32) buf28 = reinterpret_tensor(buf37, (1,), (1,), 0) buf29 = reinterpret_tensor(buf37, (1,), (1,), 1) buf30 = reinterpret_tensor(buf37, (1,), (1,), 2) buf31 = reinterpret_tensor(buf37, (1,), (1,), 3) buf32 = reinterpret_tensor(buf37, (1,), (1,), 4) buf33 = reinterpret_tensor(buf37, (1,), (1,), 5) buf34 = reinterpret_tensor(buf37, (1,), (1,), 6) buf35 = reinterpret_tensor(buf37, (1,), (1,), 7) buf36 = reinterpret_tensor(buf37, (1,), (1,), 8) triton_for_fused_4[9, 1, 1](primals_11, primals_14, primals_17, primals_20, primals_23, primals_26, primals_29, primals_32, primals_35, buf28, buf29, buf30, buf31, buf32, buf33, buf34, buf35, buf36, num_warps=8, num_stages=1) del primals_11 del primals_14 del primals_17 del primals_20 del primals_23 del primals_26 del primals_29 del primals_32 del primals_35 buf38 = empty_strided_cuda((9, 1), (1, 1), torch.float32) triton_poi_fused_sigmoid_5[grid(9)](buf37, buf38, 9, XBLOCK=16, num_warps=1, num_stages=1) del buf28 del buf29 del buf30 del buf31 del buf32 del buf33 del buf34 del buf35 del buf36 del buf37 buf39 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf40 = buf39 del buf39 buf117 = reinterpret_tensor(buf23, (1,), (1,), 0) del buf23 buf118 = empty_strided_cuda((1,), (1,), torch.float32) buf119 = empty_strided_cuda((1,), (1,), torch.float32) buf122 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_log_mul_pow_rsub_sum_6[grid(1)](primals_36, buf38, primals_37, buf117, buf118, buf119, buf122, 1, 4, XBLOCK =1, num_warps=2, num_stages=1) buf42 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_7[grid(1)](primals_13, buf42, 1, 3, XBLOCK =1, num_warps=2, num_stages=1) buf47 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_7[grid(1)](primals_16, buf47, 1, 3, XBLOCK =1, num_warps=2, num_stages=1) buf50 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_8[grid(1)](primals_18, buf50, 1, 24, XBLOCK=1, num_warps=2, num_stages=1) buf51 = empty_strided_cuda((), (), torch.float32) buf126 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_mul_pow_rsub_sum_9[grid(1)](primals_19, buf50, buf38, buf51, buf126, 1, 6, XBLOCK=1, num_warps=2, num_stages=1) buf56 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_7[grid(1)](primals_22, buf56, 1, 3, XBLOCK =1, num_warps=2, num_stages=1) buf60 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_7[grid(1)](primals_25, buf60, 1, 3, XBLOCK =1, num_warps=2, num_stages=1) buf64 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_8[grid(1)](primals_27, buf64, 1, 24, XBLOCK=1, num_warps=2, num_stages=1) buf65 = empty_strided_cuda((), (), torch.float32) buf123 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_mul_pow_rsub_sum_10[grid(1)](primals_28, buf64, buf38, buf65, buf123, 1, 6, XBLOCK=1, num_warps=2, num_stages=1) buf68 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_11[grid(1)](primals_30, buf68, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf72 = empty_strided_cuda((), (), torch.float32) triton_per_fused_pow_sum_11[grid(1)](primals_33, buf72, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf121 = empty_strided_cuda((), (), torch.float32) buf124 = empty_strided_cuda((1,), (1,), torch.float32) buf125 = empty_strided_cuda((1,), (1,), torch.float32) buf127 = empty_strided_cuda((1,), (1,), torch.float32) buf128 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_add_div_mul_pow_rsub_sum_12[grid(1)](primals_21, primals_15, primals_24, primals_12, buf42, buf38, buf27, buf18, buf9, buf50, buf51, buf47, buf60, buf56, buf68, primals_31, buf64, buf65, buf119, buf72, primals_34, buf121, buf124, buf125, buf127, buf128, 1, 12, XBLOCK=1, num_warps=2, num_stages=1) del buf119 del buf18 del buf27 del buf42 del buf47 del buf50 del buf51 del buf56 del buf60 del buf64 del buf65 del buf68 del buf72 del buf9 buf44 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf45 = buf44 del buf44 buf48 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf49 = buf48 del buf48 buf53 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf54 = buf53 del buf53 buf57 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf58 = buf57 del buf57 buf62 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf63 = buf62 del buf62 buf66 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf67 = buf66 del buf66 buf70 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf71 = buf70 del buf70 buf73 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf74 = buf73 del buf73 buf84 = empty_strided_cuda((36, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf75 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 0) triton_poi_fused_add_div_log_rsub_sub_13[grid(256)](buf38, buf40, buf75, 256, XBLOCK=128, num_warps=4, num_stages=1) buf76 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 256) triton_poi_fused_add_div_log_rsub_sub_14[grid(256)](buf38, buf45, buf76, 256, XBLOCK=256, num_warps=4, num_stages=1) buf77 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 512) triton_poi_fused_add_div_log_rsub_sub_15[grid(256)](buf38, buf49, buf77, 256, XBLOCK=256, num_warps=4, num_stages=1) buf78 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 768) triton_poi_fused_add_div_log_rsub_sub_16[grid(256)](buf38, buf54, buf78, 256, XBLOCK=128, num_warps=4, num_stages=1) buf79 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1024) triton_poi_fused_add_div_log_rsub_sub_17[grid(256)](buf38, buf58, buf79, 256, XBLOCK=128, num_warps=4, num_stages=1) buf80 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1280) triton_poi_fused_add_div_log_rsub_sub_18[grid(256)](buf38, buf63, buf80, 256, XBLOCK=256, num_warps=4, num_stages=1) buf81 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1536) triton_poi_fused_add_div_log_rsub_sub_19[grid(256)](buf38, buf67, buf81, 256, XBLOCK=128, num_warps=4, num_stages=1) buf82 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 1792) triton_poi_fused_add_div_log_rsub_sub_20[grid(256)](buf38, buf71, buf82, 256, XBLOCK=128, num_warps=4, num_stages=1) buf83 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 2048) triton_poi_fused_add_div_log_rsub_sub_21[grid(256)](buf38, buf74, buf83, 256, XBLOCK=256, num_warps=4, num_stages=1) buf85 = empty_strided_cuda((9, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_sigmoid_22[grid(2304)](buf84, buf85, 2304, XBLOCK= 128, num_warps=4, num_stages=1) del buf75 del buf76 del buf77 del buf78 del buf79 del buf80 del buf81 del buf82 del buf83 del buf84 buf86 = buf74 del buf74 buf87 = buf71 del buf71 buf90 = buf67 del buf67 buf91 = buf63 del buf63 buf94 = buf58 del buf58 buf95 = buf54 del buf54 buf97 = buf49 del buf49 buf98 = buf45 del buf45 buf100 = buf40 del buf40 buf101 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf103 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf104 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf106 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf107 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf109 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf110 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf112 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) buf113 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused_div_mul_rsub_tanh_23[grid(256)](buf85, buf22, buf38, buf86, buf87, buf90, buf91, buf94, buf95, buf97, buf98, buf100, buf101, buf103, buf104, buf106, buf107, buf109, buf110, buf112, buf113, 256, XBLOCK=256, num_warps=4, num_stages=1) buf89 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_34, reinterpret_tensor(buf87, (64, 4), (4, 1), 0), reinterpret_tensor(primals_33, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf89) buf93 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_31, reinterpret_tensor(buf91, (64, 4), (4, 1), 0), reinterpret_tensor(primals_30, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf93) buf96 = empty_strided_cuda((64, 6), (6, 1), torch.float32) extern_kernels.addmm(primals_28, reinterpret_tensor(buf95, (64, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 6), (1, 4), 0), alpha=1, beta=1, out=buf96) buf99 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_25, reinterpret_tensor(buf98, (64, 4), (4, 1), 0), reinterpret_tensor(primals_24, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf99) buf102 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_22, reinterpret_tensor(buf101, (64, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf102) buf105 = empty_strided_cuda((64, 6), (6, 1), torch.float32) extern_kernels.addmm(primals_19, reinterpret_tensor(buf104, (64, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 6), (1, 4), 0), alpha=1, beta=1, out=buf105) buf108 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_16, reinterpret_tensor(buf107, (64, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf108) buf111 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf110, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf111) buf115 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_37, reinterpret_tensor(buf113, (64, 4), (4, 1), 0), reinterpret_tensor(primals_36, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf115) return (reinterpret_tensor(buf111, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf108, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf105, (4, 4, 4, 6), (96, 24, 6, 1), 0), reinterpret_tensor(buf102, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf99, (4, 4, 4, 3), (48, 12, 3, 1), 0), reinterpret_tensor(buf96, (4, 4, 4, 6), (96, 24, 6, 1), 0), reinterpret_tensor(buf93, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf89, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf115, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf121, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_12, primals_13, primals_15, primals_16, primals_18, primals_19, primals_21, primals_22, primals_24, primals_25, primals_27, primals_28, primals_30, primals_31, primals_33, primals_34, primals_36, primals_37, buf2, buf3, buf4, buf7, buf8, buf11, buf12, buf13, buf16, buf17, buf20, buf21, buf22, buf25, buf26, buf38, reinterpret_tensor(buf38, (1,), (1,), 0), reinterpret_tensor(buf38, (1,), (1,), 1), reinterpret_tensor(buf38, (1,), (1,), 2), reinterpret_tensor(buf38, (1,), (1,), 3), reinterpret_tensor(buf38, (1,), (1,), 4), reinterpret_tensor(buf38, (1,), (1,), 5), reinterpret_tensor(buf38, (1,), (1,), 6), reinterpret_tensor(buf38, (1,), (1,), 7), reinterpret_tensor(buf38, (1,), (1,), 8), buf85, buf86, buf87, buf90, buf91, buf94, buf95, buf97, buf98, buf100, buf101, buf103, buf104, buf106, buf107, buf109, buf110, buf112, buf113, buf117, buf118, buf122, buf123, buf124, buf125, buf126, buf127, buf128, buf129, buf130, buf131) class ConcreteDropout(nn.Module): def __init__(self, weight_regularizer=1e-06, dropout_regularizer=1e-05, init_min=0.1, init_max=0.1): super(ConcreteDropout, self).__init__() self.weight_regularizer = weight_regularizer self.dropout_regularizer = dropout_regularizer init_min = np.log(init_min) - np.log(1.0 - init_min) init_max = np.log(init_max) - np.log(1.0 - init_max) self.p_logit = nn.Parameter(torch.empty(1).uniform_(init_min, init_max) ) def forward(self, x, layer): p = torch.sigmoid(self.p_logit) out = layer(self._concrete_dropout(x, p)) sum_of_square = 0 for param in layer.parameters(): sum_of_square += torch.sum(torch.pow(param, 2)) weights_regularizer = self.weight_regularizer * sum_of_square / (1 - p) dropout_regularizer = p * torch.log(p) dropout_regularizer += (1.0 - p) * torch.log(1.0 - p) input_dimensionality = x[0].numel() dropout_regularizer *= self.dropout_regularizer * input_dimensionality regularization = weights_regularizer + dropout_regularizer return out, regularization def _concrete_dropout(self, x, p): eps = 1e-07 temp = 0.1 unif_noise = torch.rand_like(x) drop_prob = torch.log(p + eps) - torch.log(1 - p + eps) + torch.log( unif_noise + eps) - torch.log(1 - unif_noise + eps) drop_prob = torch.sigmoid(drop_prob / temp) random_tensor = 1 - drop_prob retain_prob = 1 - p x = torch.mul(x, random_tensor) x /= retain_prob return x class ConcreteDenseMixtureNew(nn.Module): def __init__(self, X_dim, Y_dim, nb_features, weight_regularizer, dropout_regularizer, verbose=True): super(ConcreteDenseMixtureNew, self).__init__() self.verbose = verbose self.rank = 2 self.linear1 = nn.Linear(X_dim, nb_features) self.linear2 = nn.Linear(nb_features, nb_features) self.linear3 = nn.Linear(nb_features, nb_features) self.linear4_mu = nn.Linear(nb_features, Y_dim - 1) self.linear4_logvar = nn.Linear(nb_features, Y_dim - 1) self.linear4_F = nn.Linear(nb_features, (Y_dim - 1) * self.rank) self.linear4_mu2 = nn.Linear(nb_features, Y_dim - 1) self.linear4_logvar2 = nn.Linear(nb_features, Y_dim - 1) self.linear4_F2 = nn.Linear(nb_features, (Y_dim - 1) * self.rank) self.linear4_alpha = nn.Linear(nb_features, 1) self.linear4_mu_classifier = nn.Linear(nb_features, 1) self.linear4_logvar_classifier = nn.Linear(nb_features, 1) self.conc_drop1 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop3 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_mu = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_logvar = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_F = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_mu2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_logvar2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_F2 = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_alpha = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_mu_classifier = ConcreteDropout(weight_regularizer= weight_regularizer, dropout_regularizer=dropout_regularizer) self.conc_drop_logvar_classifier = ConcreteDropout(weight_regularizer =weight_regularizer, dropout_regularizer=dropout_regularizer) self.tanh = nn.Tanh() def forward(self, input_0): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_6 = self.linear2.weight primals_7 = self.linear2.bias primals_9 = self.linear3.weight primals_10 = self.linear3.bias primals_12 = self.linear4_mu.weight primals_13 = self.linear4_mu.bias primals_15 = self.linear4_logvar.weight primals_16 = self.linear4_logvar.bias primals_18 = self.linear4_F.weight primals_19 = self.linear4_F.bias primals_21 = self.linear4_mu2.weight primals_22 = self.linear4_mu2.bias primals_24 = self.linear4_logvar2.weight primals_25 = self.linear4_logvar2.bias primals_27 = self.linear4_F2.weight primals_28 = self.linear4_F2.bias primals_30 = self.linear4_alpha.weight primals_2 = self.linear4_alpha.bias primals_33 = self.linear4_mu_classifier.weight primals_5 = self.linear4_mu_classifier.bias primals_36 = self.linear4_logvar_classifier.weight primals_8 = self.linear4_logvar_classifier.bias primals_11 = self.conc_drop1.p_logit primals_14 = self.conc_drop2.p_logit primals_17 = self.conc_drop3.p_logit primals_20 = self.conc_drop_mu.p_logit primals_23 = self.conc_drop_logvar.p_logit primals_26 = self.conc_drop_F.p_logit primals_29 = self.conc_drop_mu2.p_logit primals_31 = self.conc_drop_logvar2.p_logit primals_32 = self.conc_drop_F2.p_logit primals_34 = self.conc_drop_alpha.p_logit primals_35 = self.conc_drop_mu_classifier.p_logit primals_37 = self.conc_drop_logvar_classifier.p_logit primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37]) return output[0], output[1], output[2], output[3], output[4], output[5 ], output[6], output[7], output[8], output[9]
jiwoncpark/fast-forward
ConcreteDenseMixture
false
10,448
[ "MIT" ]
0
640a521241a8756be2a0d42282e88d56a2290fca
https://github.com/jiwoncpark/fast-forward/tree/640a521241a8756be2a0d42282e88d56a2290fca
FirstBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ro/crodqpemvzed7dwj6ikboerwmi47yfap7jg5xgrgodxuptzllrip.py # Topologically Sorted Source Nodes: [mul, leaky_relu], Original ATen: [aten.mul, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # leaky_relu => gt, mul_1, where # mul => mul # Graph fragment: # %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mul, leaky_relu], Original ATen: [aten.mul, aten.leaky_relu, aten.leaky_relu_backward] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf1, primals_1, primals_2, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class BatchNormLayer(nn.Module): """Implements batch normalization layer.""" def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon =1e-05): """Initializes with basic settings. Args: channels: Number of channels of the input tensor. gamma: Whether the scale (weight) of the affine mapping is learnable. beta: Whether the center (bias) of the affine mapping is learnable. decay: Decay factor for moving average operations in this layer. epsilon: A value added to the denominator for numerical stability. """ super().__init__() self.bn = nn.BatchNorm2d(num_features=channels, affine=True, track_running_stats=True, momentum=1 - decay, eps=epsilon) self.bn.weight.requires_grad = gamma self.bn.bias.requires_grad = beta def forward(self, x): return self.bn(x) class FirstBlock(nn.Module): """Implements the first block, which is a convolutional block.""" def __init__(self, in_channels, out_channels, use_wscale=False, wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'): super().__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3 ) if use_wscale else 1.0 self.bn = BatchNormLayer(channels=out_channels ) if use_bn else nn.Identity() if activation_type == 'linear': self.activate = nn.Identity() elif activation_type == 'lrelu': self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) else: raise NotImplementedError( f'Not implemented activation function: {activation_type}!') def forward(self, x): return self.activate(self.bn(self.conv(x) * self.scale)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf2 class BatchNormLayer(nn.Module): """Implements batch normalization layer.""" def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon =1e-05): """Initializes with basic settings. Args: channels: Number of channels of the input tensor. gamma: Whether the scale (weight) of the affine mapping is learnable. beta: Whether the center (bias) of the affine mapping is learnable. decay: Decay factor for moving average operations in this layer. epsilon: A value added to the denominator for numerical stability. """ super().__init__() self.bn = nn.BatchNorm2d(num_features=channels, affine=True, track_running_stats=True, momentum=1 - decay, eps=epsilon) self.bn.weight.requires_grad = gamma self.bn.bias.requires_grad = beta def forward(self, x): return self.bn(x) class FirstBlockNew(nn.Module): """Implements the first block, which is a convolutional block.""" def __init__(self, in_channels, out_channels, use_wscale=False, wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'): super().__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3 ) if use_wscale else 1.0 self.bn = BatchNormLayer(channels=out_channels ) if use_bn else nn.Identity() if activation_type == 'linear': self.activate = nn.Identity() elif activation_type == 'lrelu': self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) else: raise NotImplementedError( f'Not implemented activation function: {activation_type}!') def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
lelechen63/idinvert_pytorch
FirstBlock
false
10,449
[ "MIT" ]
0
0469e1e5460ee4dd626c05bd35a83d52f9dc2cac
https://github.com/lelechen63/idinvert_pytorch/tree/0469e1e5460ee4dd626c05bd35a83d52f9dc2cac
LastBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ey/cey4yc74rqvkmtcxtse2vt3dw6pfdi3zwtwezx7cdzkykhz4kzp7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # x_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf1, 16, grid=grid(16), stream=stream0) return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class BatchNormLayer(nn.Module): """Implements batch normalization layer.""" def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon =1e-05): """Initializes with basic settings. Args: channels: Number of channels of the input tensor. gamma: Whether the scale (weight) of the affine mapping is learnable. beta: Whether the center (bias) of the affine mapping is learnable. decay: Decay factor for moving average operations in this layer. epsilon: A value added to the denominator for numerical stability. """ super().__init__() self.bn = nn.BatchNorm2d(num_features=channels, affine=True, track_running_stats=True, momentum=1 - decay, eps=epsilon) self.bn.weight.requires_grad = gamma self.bn.bias.requires_grad = beta def forward(self, x): return self.bn(x) class LastBlock(nn.Module): """Implements the last block, which is a dense block.""" def __init__(self, in_channels, out_channels, use_wscale=False, wscale_gain=1.0, use_bn=False): super().__init__() self.fc = nn.Linear(in_features=in_channels, out_features= out_channels, bias=False) self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0 self.bn = BatchNormLayer(channels=out_channels ) if use_bn else nn.Identity() def forward(self, x): x = x.view(x.shape[0], -1) x = self.fc(x) * self.scale x = x.view(x.shape[0], x.shape[1], 1, 1) return self.bn(x).view(x.shape[0], x.shape[1]) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_1 class BatchNormLayer(nn.Module): """Implements batch normalization layer.""" def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon =1e-05): """Initializes with basic settings. Args: channels: Number of channels of the input tensor. gamma: Whether the scale (weight) of the affine mapping is learnable. beta: Whether the center (bias) of the affine mapping is learnable. decay: Decay factor for moving average operations in this layer. epsilon: A value added to the denominator for numerical stability. """ super().__init__() self.bn = nn.BatchNorm2d(num_features=channels, affine=True, track_running_stats=True, momentum=1 - decay, eps=epsilon) self.bn.weight.requires_grad = gamma self.bn.bias.requires_grad = beta def forward(self, x): return self.bn(x) class LastBlockNew(nn.Module): """Implements the last block, which is a dense block.""" def __init__(self, in_channels, out_channels, use_wscale=False, wscale_gain=1.0, use_bn=False): super().__init__() self.fc = nn.Linear(in_features=in_channels, out_features= out_channels, bias=False) self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0 self.bn = BatchNormLayer(channels=out_channels ) if use_bn else nn.Identity() def forward(self, input_0): primals_1 = self.fc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
lelechen63/idinvert_pytorch
LastBlock
false
10,450
[ "MIT" ]
0
0469e1e5460ee4dd626c05bd35a83d52f9dc2cac
https://github.com/lelechen63/idinvert_pytorch/tree/0469e1e5460ee4dd626c05bd35a83d52f9dc2cac
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/w3/cw3egt7ajdde7mbqzrdxs4mdcaxj75b4l3brz5gbsf4yd73gbids.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/y2/cy2lwgz7dq2q2z4ifepdde4l7vyyvrwcx4zjn2ezmtzcanvhv374.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/dm/cdmk5acvohhdeyjsc6lsgsurbqis3xmgjftzfmanusy52dkygd23.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (512, 784), (784, 1)) assert_size_stride(primals_3, (512, ), (1, )) assert_size_stride(primals_4, (256, 512), (512, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (64, 256), (256, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (10, 64), (64, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 2048, grid=grid(2048), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 1024, grid=grid(1024), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 64), (1, 256), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_9 return (buf6, primals_1, buf1, buf3, buf5, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self): super(MLP, self).__init__() self.fc1 = nn.Linear(28 * 28, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, 64) self.fc4 = nn.Linear(64, 10) self.dropout = nn.Dropout(0.1) def forward(self, x): x = x.view(-1, 28 * 28) x = F.relu(self.fc1(x)) self.dropout(x) x = F.relu(self.fc2(x)) self.dropout(x) x = F.relu(self.fc3(x)) self.dropout(x) x = self.fc4(x) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (512, 784), (784, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (256, 512), (512, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (64, 256), (256, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (10, 64), (64, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 256), ( 1, 512), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1024)](buf3, primals_5, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 64), (1, 256), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(256)](buf5, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_9 return buf6, primals_1, buf1, buf3, buf5, primals_8, primals_6, primals_4 class MLPNew(nn.Module): def __init__(self): super(MLPNew, self).__init__() self.fc1 = nn.Linear(28 * 28, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, 64) self.fc4 = nn.Linear(64, 10) self.dropout = nn.Dropout(0.1) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
lynfi/Classification
MLP
false
10,451
[ "MIT" ]
0
691731629c6577432c8c9eee70b67911011a07b7
https://github.com/lynfi/Classification/tree/691731629c6577432c8c9eee70b67911011a07b7
CausalConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fk/cfkrz7kxwztwd7brbtic3lrvcvxemi6b7x3jqgbfiyfesqxvlh2f.py # Topologically Sorted Source Nodes: [pad, x], Original ATen: [aten.constant_pad_nd, aten.squeeze] # Source node to ATen node mapping: # pad => constant_pad_nd # x => squeeze # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%unsqueeze, [3, 0, 0, 0], 0.0), kwargs = {}) # %squeeze : [num_users=2] = call_function[target=torch.ops.aten.squeeze.dim](args = (%constant_pad_nd, 2), kwargs = {}) triton_poi_fused_constant_pad_nd_squeeze_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_squeeze_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_squeeze_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_squeeze_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = (-3) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (x1), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%squeeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pad, x], Original ATen: [aten.constant_pad_nd, aten.squeeze] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_squeeze_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CausalConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias) self.left_padding = dilation * (kernel_size - 1) def forward(self, input): x = F.pad(input.unsqueeze(2), (self.left_padding, 0, 0, 0)).squeeze(2) return super(CausalConv1d, self).forward(x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_squeeze_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = -3 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + x1, tmp2 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_squeeze_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class CausalConv1dNew(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super(CausalConv1dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias) self.left_padding = dilation * (kernel_size - 1) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
marc-moreaux/pytorch_text_generator
CausalConv1d
false
10,452
[ "MIT" ]
0
99dd11c67d89f8a09faa28b7032fcc66f90672c0
https://github.com/marc-moreaux/pytorch_text_generator/tree/99dd11c67d89f8a09faa28b7032fcc66f90672c0
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ji/cji7mw45fbdoanjc5e6qu3e2bf5d6jnnjabskl6onjlk7uv7oqud.py # Topologically Sorted Source Nodes: [add, output_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # output_2 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xy/cxyvzp6lij7d3yqq2ut3vi6guk7xnzb7qwqb66dthlly44r65vfk.py # Topologically Sorted Source Nodes: [add, output_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # output_2 => add_1, add_2, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf6, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, output_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(buf2, primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, output_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0) del buf3 del buf4 del primals_7 return (buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions class PositionwiseFeedForward(nn.Module): """Implements position-wise feedforward sublayer. FFN(x) = max(0, xW1 + b1)W2 + b2 """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model) def forward(self, x): residual = x output = self.w_2(F.relu(self.w_1(x))) output = self.dropout(output) output = self.layer_norm(output + residual) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class PositionwiseFeedForwardNew(nn.Module): """Implements position-wise feedforward sublayer. FFN(x) = max(0, xW1 + b1)W2 + b2 """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
greenstar1151/pytorch-benchmark
PositionwiseFeedForward
false
10,453
[ "BSD-3-Clause" ]
0
8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
https://github.com/greenstar1151/pytorch-benchmark/tree/8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
Decoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/se/cse7sfgf5sgpbvm22egbkgr6e26lmuzgp3epjbv2y2kstod536nr.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rj/crjqubz53turnzr3eytn5bygdu3aanhnbasxm5kv2343ef6srmqz.py # Topologically Sorted Source Nodes: [conv_transpose2d, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv_transpose2d => convolution # x_2 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7j/c7jlcpfayfgm4ionvsotdwcpvr26emperi3u36tmlugpw3iltf26.py # Topologically Sorted Source Nodes: [conv_transpose2d_1, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv_transpose2d_1 => convolution_1 # x_3 => relu_2 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 169) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/d6/cd6lk7yvfcgve23ubphsppwnxecscgcqsmgwcnmcf7e736rcj3do.py # Topologically Sorted Source Nodes: [conv_transpose2d_2, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv_transpose2d_2 => convolution_2 # x_4 => relu_3 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 900) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hs/chsgbq3hqknrxoqdmknsp4fh57w5pvfawifuk6rnybsj7hi324s7.py # Topologically Sorted Source Nodes: [conv_transpose2d_3, reconstr], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # conv_transpose2d_3 => convolution_3 # reconstr => sigmoid # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_sigmoid_4 = async_compile.triton('triton_poi_fused_convolution_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (32, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0; del buf0 # reuse buf10 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 16, grid=grid(16), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 0, 0), 0), primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 5, 5), (3200, 25, 5, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 12800, grid=grid(12800), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 13, 13), (10816, 169, 13, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_1, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 43264, grid=grid(43264), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 30, 30), (28800, 900, 30, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_2, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf7, primals_9, 115200, grid=grid(115200), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv_transpose2d_3], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_3, reconstr], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_4.run(buf9, primals_11, 16384, grid=grid(16384), stream=stream0) del primals_11 return (buf9, primals_3, primals_4, primals_6, primals_8, primals_10, reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf5, buf7, buf9, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 32, 6, 6), (1152, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((32, 1, 6, 6), (36, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Decoder(nn.Module): def __init__(self, latent_size, m): super(Decoder, self).__init__() self.latent_size = latent_size self.fc = nn.Linear(latent_size, m) self.deconv1 = nn.ConvTranspose2d(m, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 1, 6, stride=2) def forward(self, x): x = torch.relu(self.fc(x)) x = x.unsqueeze(-1).unsqueeze(-1) x = torch.relu(self.deconv1(x)) x = torch.relu(self.deconv2(x)) x = torch.relu(self.deconv3(x)) reconstr = torch.sigmoid(self.deconv4(x)) return reconstr def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'm': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 128 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 169 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 0, 0), 0), primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups =1, bias=None) assert_size_stride(buf2, (4, 128, 5, 5), (3200, 25, 5, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(12800)](buf3, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 13, 13), (10816, 169, 13, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(43264)](buf5, primals_7, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 30, 30), (28800, 900, 30, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(115200)](buf7, primals_9, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_sigmoid_4[grid(16384)](buf9, primals_11, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_3, primals_4, primals_6, primals_8, primals_10, reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf5, buf7, buf9, buf10) class DecoderNew(nn.Module): def __init__(self, latent_size, m): super(DecoderNew, self).__init__() self.latent_size = latent_size self.fc = nn.Linear(latent_size, m) self.deconv1 = nn.ConvTranspose2d(m, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 1, 6, stride=2) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_6 = self.deconv2.weight primals_7 = self.deconv2.bias primals_8 = self.deconv3.weight primals_9 = self.deconv3.bias primals_10 = self.deconv4.weight primals_11 = self.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
lshoek/creative-evo-controller
Decoder
false
10,454
[ "MIT" ]
0
a5f1742c172255cca2338b76ae1c5b4db277bb0d
https://github.com/lshoek/creative-evo-controller/tree/a5f1742c172255cca2338b76ae1c5b4db277bb0d
TemporalConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6s/c6saluonoqfxerrd3p7h7p6inhepj24zjy54kxuetr27abkw2rto.py # Topologically Sorted Source Nodes: [P, conv2d_1, conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # P => convolution # conv2d_1 => convolution_1 # conv2d_2 => convolution_2 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = (xindex // 4) y0 = yindex % 4 y1 = (yindex // 4) x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x3) + (16*x2) + (64*y1)), xmask & ymask) tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask) tl.store(out_ptr1 + (x5 + (16*y4)), tmp0, xmask & ymask) tl.store(out_ptr2 + (x5 + (16*y4)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ba/cbaejeje4ctf6bizvw6ak33yknc5bygfyu5seecmofky7uxsvxmu.py # Topologically Sorted Source Nodes: [P, conv2d_1, Q, PQ, conv2d_2, add_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # P => convolution # PQ => add # Q => sigmoid # add_1 => add_1 # conv2d_1 => convolution_1 # conv2d_2 => convolution_2 # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %sigmoid), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convolution_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 8) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x3), xmask) tmp9 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp2) tmp7 = tmp5 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = 0.0 tmp15 = tmp13 <= tmp14 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp13, xmask) tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [P, conv2d_1, conv2d_2], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, buf2, buf5, 16, 16, grid=grid(16, 16), stream=stream0) # Topologically Sorted Source Nodes: [P], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 2), (32, 8, 2, 1)) del buf0 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 2), (32, 8, 2, 1)) del buf2 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 2), (32, 8, 2, 1)) del buf5 buf4 = buf3; del buf3 # reuse buf7 = buf1; del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [P, conv2d_1, Q, PQ, conv2d_2, add_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1.run(buf4, buf7, primals_5, primals_3, buf6, primals_7, buf8, 128, grid=grid(128), stream=stream0) del buf6 del primals_3 del primals_5 del primals_7 return (reinterpret_tensor(buf7, (4, 2, 4, 4), (32, 1, 2, 8), 0), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 4, 16), 0), buf4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class TemporalConv(nn.Module): """Temporal convolution block applied to nodes in the STGCN Layer For details see: `"Spatio-Temporal Graph Convolutional Networks: A Deep Learning Framework for Traffic Forecasting" <https://arxiv.org/abs/1709.04875>`_ Based off the temporal convolution introduced in "Convolutional Sequence to Sequence Learning" <https://arxiv.org/abs/1709.04875>`_ NB. Given an input sequence of length m and a kernel size of k the output sequence will have length m-(k-1) Args: in_channels (int): Number of input features. out_channels (int): Number of output features. kernel_size (int): Convolutional kernel size. """ def __init__(self, in_channels, out_channels, kernel_size=3): super(TemporalConv, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) def forward(self, X): """Forward pass through temporal convolution block Args: X (torch.Tensor): Input data of shape (batch_size, input_time_steps, num_nodes, in_channels) """ X = X.permute(0, 3, 2, 1) P = self.conv1(X) Q = torch.sigmoid(self.conv2(X)) PQ = P + Q out = F.relu(PQ + self.conv3(X)) out = out.permute(0, 3, 2, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) tl.store(out_ptr1 + (x5 + 16 * y4), tmp0, xmask & ymask) tl.store(out_ptr2 + (x5 + 16 * y4), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 8 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x3, xmask) tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp2) tmp7 = tmp5 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = 0.0 tmp15 = tmp13 <= tmp14 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp13, xmask) tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 16)](primals_1, buf0, buf2, buf5, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 2), (32, 8, 2, 1)) del buf0 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 2), (32, 8, 2, 1)) del buf2 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 2), (32, 8, 2, 1)) del buf5 buf4 = buf3 del buf3 buf7 = buf1 del buf1 buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1[grid (128)](buf4, buf7, primals_5, primals_3, buf6, primals_7, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_3 del primals_5 del primals_7 return reinterpret_tensor(buf7, (4, 2, 4, 4), (32, 1, 2, 8), 0 ), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, ( 4, 4, 4, 4), (64, 1, 4, 16), 0), buf4, buf8 class TemporalConvNew(nn.Module): """Temporal convolution block applied to nodes in the STGCN Layer For details see: `"Spatio-Temporal Graph Convolutional Networks: A Deep Learning Framework for Traffic Forecasting" <https://arxiv.org/abs/1709.04875>`_ Based off the temporal convolution introduced in "Convolutional Sequence to Sequence Learning" <https://arxiv.org/abs/1709.04875>`_ NB. Given an input sequence of length m and a kernel size of k the output sequence will have length m-(k-1) Args: in_channels (int): Number of input features. out_channels (int): Number of output features. kernel_size (int): Convolutional kernel size. """ def __init__(self, in_channels, out_channels, kernel_size=3): super(TemporalConvNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
marcdemers/pytorch_geometric_temporal
TemporalConv
false
10,455
[ "MIT" ]
0
446aadcd890158bade2e9974f9840ed5a7bba827
https://github.com/marcdemers/pytorch_geometric_temporal/tree/446aadcd890158bade2e9974f9840ed5a7bba827
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fp/cfp5jrxxyxrvhcpoq5tio3p5tkhj5ugdrpyur3x4v6meatzih7jn.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lf/clf45mspfcg7t5x4om2snxq42eoe4jywsisc72sbpggbkipki6jb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/j4/cj4ubfzqedljpxnezgsaqlcbmis75oaa7j6dddwnyaytki3in5gp.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] # Source node to ATen node mapping: # out => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qo/cqofti6gsrehqlmzbkmbxoumphvdsb6r5tb6hgxmdqpgruph2ym4.py # Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # out_2 => relu_2 # out_3 => relu_3 # out_4 => add # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_2, %relu_3), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tmp8 = tmp2 <= tmp6 tl.store(out_ptr0 + (y0 + (16*x2) + (4096*y1)), tmp5, xmask & ymask) tl.store(out_ptr1 + (x2 + (256*y3)), tmp7, xmask & ymask) tl.store(out_ptr2 + (x2 + (256*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_4, (256, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_5, (256, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_2, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 4, 4), (2048, 1, 512, 128)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf3, 8192, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 4, 4), (2048, 1, 512, 128)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf5, 8192, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256)) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf0, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf8 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool) buf10 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool) # Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_3.run(buf6, buf7, buf8, buf9, buf10, 64, 256, grid=grid(64, 256), stream=stream0) del buf6 del buf7 return (buf8, primals_1, buf0, buf1, primals_4, primals_5, buf3, buf5, buf9, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init as init class conv_relu(nn.Module): """docstring for conv_relu""" def __init__(self, in_channels, out_channels, **kwargs): super(conv_relu, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) def forward(self, x): out = F.relu(self.conv(x), inplace=True) return out class ResBlock(nn.Module): """docstring for ResBlock""" def __init__(self, in_channels): super(ResBlock, self).__init__() self.res1a = conv_relu(in_channels, 128, kernel_size=1) self.res1b = conv_relu(128, 128, kernel_size=3, padding=1) self.res1c = conv_relu(128, 256, kernel_size=1) self.res2a = conv_relu(in_channels, 256, kernel_size=1) def forward(self, x): out1 = self.res1a(x) out1 = self.res1b(out1) out1 = self.res1c(out1) out2 = self.res2a(x) out = out1 + out2 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 256 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tmp8 = tmp2 <= tmp6 tl.store(out_ptr0 + (y0 + 16 * x2 + 4096 * y1), tmp5, xmask & ymask) tl.store(out_ptr1 + (x2 + 256 * y3), tmp7, xmask & ymask) tl.store(out_ptr2 + (x2 + 256 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_4, (256, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_5, (256, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](primals_2, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_1[grid(16384, 9)](primals_3, buf1, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 4, 4), (2048, 1, 512, 128)) buf3 = buf2 del buf2 triton_poi_fused_relu_2[grid(8192)](buf3, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 4, 4), (2048, 1, 512, 128)) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(8192)](buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf7 = extern_kernels.convolution(buf0, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf8 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) buf9 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool) buf10 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool) triton_poi_fused_add_relu_threshold_backward_3[grid(64, 256)](buf6, buf7, buf8, buf9, buf10, 64, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf6 del buf7 return (buf8, primals_1, buf0, buf1, primals_4, primals_5, buf3, buf5, buf9, buf10) class conv_relu(nn.Module): """docstring for conv_relu""" def __init__(self, in_channels, out_channels, **kwargs): super(conv_relu, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) def forward(self, x): out = F.relu(self.conv(x), inplace=True) return out class ResBlockNew(nn.Module): """docstring for ResBlock""" def __init__(self, in_channels): super(ResBlockNew, self).__init__() self.res1a = conv_relu(in_channels, 128, kernel_size=1) self.res1b = conv_relu(128, 128, kernel_size=3, padding=1) self.res1c = conv_relu(128, 256, kernel_size=1) self.res2a = conv_relu(in_channels, 256, kernel_size=1) def forward(self, input_0): primals_1 = self.res1a.conv.weight primals_3 = self.res1b.conv.weight primals_4 = self.res1c.conv.weight primals_5 = self.res2a.conv.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
llpspark/PytorchToCaffe
ResBlock
false
10,456
[ "MIT" ]
0
01f6fb2cfd42e2c06ae5d46a7a91f7fd6d40d5d1
https://github.com/llpspark/PytorchToCaffe/tree/01f6fb2cfd42e2c06ae5d46a7a91f7fd6d40d5d1
PairwiseRankingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/eq/ceqhkwm2d735ddv4kryb45afnds5mdj2jfkqyfgb53xuu6ykceh3.py # Topologically Sorted Source Nodes: [sub, add, clamp, cost_sent, sub_1, add_1, clamp_1, cost_img, loss], Original ATen: [aten.rsub, aten.add, aten.clamp, aten.sum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # clamp => clamp_min # clamp_1 => clamp_min_1 # cost_img => sum_2 # cost_sent => sum_1 # loss => add_2 # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %arg2_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %arg3_1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_add_clamp_rsub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp10 = tl.load(in_ptr2 + (r0), None) tmp12 = tl.load(in_ptr3 + (r0), None) tmp1 = 4.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp11 = tmp1 - tmp10 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp13, tmp5) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = tmp9 + tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, add, clamp, cost_sent, sub_1, add_1, clamp_1, cost_img, loss], Original ATen: [aten.rsub, aten.add, aten.clamp, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_rsub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PairwiseRankingLoss(nn.Module): """ Pairwise ranking loss """ def __init__(self, margin): super(PairwiseRankingLoss, self).__init__() self.margin = margin def forward(self, anchor1, anchor2, img_sentc, sent_imgc): cost_sent = torch.clamp(self.margin - anchor1 + img_sentc, min=0.0 ).sum() cost_img = torch.clamp(self.margin - anchor2 + sent_imgc, min=0.0).sum( ) loss = cost_sent + cost_img return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr3 + r0, None) tmp1 = 4.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp11 = tmp1 - tmp10 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp13, tmp5) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = tmp9 + tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_rsub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class PairwiseRankingLossNew(nn.Module): """ Pairwise ranking loss """ def __init__(self, margin): super(PairwiseRankingLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
maksimovVva/SentEval
PairwiseRankingLoss
false
10,457
[ "BSD-3-Clause" ]
0
d3aa5f24dd84b48ea476e73f4b59a4e1ace7775c
https://github.com/maksimovVva/SentEval/tree/d3aa5f24dd84b48ea476e73f4b59a4e1ace7775c
DiscreteNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/y7/cy7463cf27qpra2f6ndigmu6ve4q6o3cbvgetgqehejtevv6yfa5.py # Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # pi1 => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_1, 0), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 6), kwargs = {}) # %bitwise_or_1 : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le_1, %ge_1), kwargs = {}) triton_poi_fused_hardtanh_hardtanh_backward_0 = async_compile.triton('triton_poi_fused_hardtanh_hardtanh_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_hardtanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3h/c3hkfz2cekpaaeiqcpxcl3mxsxlmofvggknydsueowa7yt644s4y.py # Topologically Sorted Source Nodes: [v1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # v1 => clamp_max_1, clamp_min_1 # Graph fragment: # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_5, 0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 6), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_5, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_5, 6), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {}) triton_poi_fused_hardtanh_hardtanh_backward_1 = async_compile.triton('triton_poi_fused_hardtanh_hardtanh_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_hardtanh_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x3 = (xindex // 1600) x5 = xindex % 1600 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x4), tmp6, xmask) tl.store(out_ptr1 + (x5 + (1664*x3)), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 200), (200, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (100, 4), (4, 1)) assert_size_stride(primals_7, (100, ), (1, )) assert_size_stride(primals_8, (1, 100), (100, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) # Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] stream0 = get_raw_stream(0) triton_poi_fused_hardtanh_hardtanh_backward_0.run(buf0, primals_2, buf1, buf8, 12800, grid=grid(12800), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 100), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [v1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] triton_poi_fused_hardtanh_hardtanh_backward_1.run(buf3, primals_7, buf4, buf7, 6400, grid=grid(6400), stream=stream0) del buf3 del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(buf4, (64, 100), (100, 1), 0), primals_8, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class DiscreteNet(nn.Module): def __init__(self, s_dim, a_dim): super(DiscreteNet, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 200) self.pi2 = nn.Linear(200, a_dim) self.v1 = nn.Linear(s_dim, 100) self.v2 = nn.Linear(100, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def forward(self, x): pi1 = F.relu6(self.pi1(x)) logits = self.pi2(pi1) v1 = F.relu6(self.v1(x)) values = self.v2(v1) return logits, values def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'s_dim': 4, 'a_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x3 = xindex // 1600 x5 = xindex % 1600 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + (x5 + 1664 * x3), tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 200), (200, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (100, 4), (4, 1)) assert_size_stride(primals_7, (100,), (1,)) assert_size_stride(primals_8, (1, 100), (100, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) get_raw_stream(0) triton_poi_fused_hardtanh_hardtanh_backward_0[grid(12800)](buf0, primals_2, buf1, buf8, 12800, XBLOCK=256, num_warps=4, num_stages=1 ) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 100), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_hardtanh_hardtanh_backward_1[grid(6400)](buf3, primals_7, buf4, buf7, 6400, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 200), (200, 1), 0 ), reinterpret_tensor(buf4, (64, 100), (100, 1), 0 ), primals_8, buf7, primals_4, buf8 def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class DiscreteNetNew(nn.Module): def __init__(self, s_dim, a_dim): super(DiscreteNetNew, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 200) self.pi2 = nn.Linear(200, a_dim) self.v1 = nn.Linear(s_dim, 100) self.v2 = nn.Linear(100, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def forward(self, input_0): primals_1 = self.pi1.weight primals_2 = self.pi1.bias primals_4 = self.pi2.weight primals_5 = self.pi2.bias primals_6 = self.v1.weight primals_7 = self.v1.bias primals_8 = self.v2.weight primals_9 = self.v2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
lws803/pytorch-A3C
DiscreteNet
false
10,458
[ "MIT" ]
0
944e7f42a8fa54b7d6efbe169d8a3467b20a0f7f
https://github.com/lws803/pytorch-A3C/tree/944e7f42a8fa54b7d6efbe169d8a3467b20a0f7f
NoiseLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hy/chy2gdvcdlpxio6r2ezu76sbqj6jaum4snkq6izi5zxkdqh3u2yj.py # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # x => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %randn), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.randn] buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NoiseLayer(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x, noise=None): if noise is None and self.noise is None: noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device= x.device, dtype=x.dtype) elif noise is None: noise = self.noise x = x + self.weight.view(1, -1, 1, 1) * noise return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class NoiseLayerNew(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
justinpinkney/ganspace
NoiseLayer
false
10,459
[ "Apache-2.0" ]
0
7dc76d1d2ddad21d946a7ceb375efe5d5316fb3f
https://github.com/justinpinkney/ganspace/tree/7dc76d1d2ddad21d946a7ceb375efe5d5316fb3f
mlp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cv/ccvagufpv2m2vfo3w35yjibzs4ygygyoilwi2jrpbsvlftqb6nzr.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (2048, 4), (4, 1)) assert_size_stride(primals_3, (2048, ), (1, )) assert_size_stride(primals_4, (2048, 2048), (2048, 1)) assert_size_stride(primals_5, (2048, ), (1, )) assert_size_stride(primals_6, (4, 2048), (2048, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 8192, grid=grid(8192), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf4, primals_1, buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2048, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class mlp(nn.Module): def __init__(self, seq_len): super(mlp, self).__init__() self.lin1 = nn.Linear(seq_len, 2048) self.lin2 = nn.Linear(2048, 2048) self.lin3 = nn.Linear(2048, seq_len) self.relu = nn.ReLU() def forward(self, input_): input_ = input_.reshape(input_.size(0), -1) out = self.lin1(input_) out = self.lin2(self.relu(out)) out = self.lin3(self.relu(out)) return out.view(input_.size(0), -1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'seq_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (2048, 4), (4, 1)) assert_size_stride(primals_3, (2048,), (1,)) assert_size_stride(primals_4, (2048, 2048), (2048, 1)) assert_size_stride(primals_5, (2048,), (1,)) assert_size_stride(primals_6, (4, 2048), (2048, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 2048 ), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(8192)](buf1, primals_3, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(8192)](buf3, primals_5, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class mlpNew(nn.Module): def __init__(self, seq_len): super(mlpNew, self).__init__() self.lin1 = nn.Linear(seq_len, 2048) self.lin2 = nn.Linear(2048, 2048) self.lin3 = nn.Linear(2048, seq_len) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.lin1.weight primals_3 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
liuziyang1106/sodeep
mlp
false
10,460
[ "BSD-3-Clause-Clear" ]
0
47f8a5cbe5b8405624877efc81cb28f104f1e2d7
https://github.com/liuziyang1106/sodeep/tree/47f8a5cbe5b8405624877efc81cb28f104f1e2d7
GetSegPred
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ws/cwsbiv2j3g4xvieopa3bkoe5ikyusgpeusghiciceorbcngdbj6v.py # Topologically Sorted Source Nodes: [add, mul, sub, round_1, temp_cloud, setitem], Original ATen: [aten.add, aten.mul, aten.sub, aten.round, aten._to_copy, aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # add => add # mul => mul # round_1 => round_1 # setitem => full_default, index_put # sub => sub # temp_cloud => convert_element_type # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.501), kwargs = {}) # %round_1 : [num_users=1] = call_function[target=torch.ops.aten.round.default](args = (%sub,), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%round_1, torch.int64), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=12] = call_function[target=torch.ops.aten.index_put_.default](args = (%convert_element_type, [%eq], %full_default), kwargs = {}) triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0 = async_compile.triton('triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 * tmp3 tmp5 = 0.501 tmp6 = tmp4 - tmp5 tmp7 = libdevice.nearbyint(tmp6) tmp8 = tmp7.to(tl.int64) tmp9 = tl.full([1], -1, tl.int64) tmp10 = tmp8 == tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = tl.where(tmp10, tmp11, tmp8) tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wy/cwy5lnusqcz7skukmacggq3zuqyx7pqh6vt43oihdsykv4kmxgg4.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3],), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x0 = xindex % 4 x5 = xindex tmp0 = x3 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 4, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tl.device_assert(((0 <= tl.broadcast_to(tmp9, [XBLOCK])) & (tl.broadcast_to(tmp9, [XBLOCK]) < 4)) | ~(tmp4 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp9, [XBLOCK]) < 4") tmp11 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp6 tmp13 = tmp11 < 0 tmp14 = tl.where(tmp13, tmp12, tmp11) tl.device_assert(((0 <= tl.broadcast_to(tmp14, [XBLOCK])) & (tl.broadcast_to(tmp14, [XBLOCK]) < 4)) | ~(tmp4 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp14, [XBLOCK]) < 4") tmp16 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp6 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tl.device_assert(((0 <= tl.broadcast_to(tmp19, [XBLOCK])) & (tl.broadcast_to(tmp19, [XBLOCK]) < 4)) | ~(tmp4 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp19, [XBLOCK]) < 4") tmp21 = tl.load(in_ptr1 + (tmp9 + (4*tmp19) + (16*tmp14) + (64*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 2, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp26 = tl.load(in_ptr0 + (64 + x1 + (16*x2)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tmp26 + tmp6 tmp28 = tmp26 < 0 tmp29 = tl.where(tmp28, tmp27, tmp26) tl.device_assert(((0 <= tl.broadcast_to(tmp29, [XBLOCK])) & (tl.broadcast_to(tmp29, [XBLOCK]) < 4)) | ~(tmp25 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp29, [XBLOCK]) < 4") tmp31 = tl.load(in_ptr0 + (68 + x1 + (16*x2)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 + tmp6 tmp33 = tmp31 < 0 tmp34 = tl.where(tmp33, tmp32, tmp31) tl.device_assert(((0 <= tl.broadcast_to(tmp34, [XBLOCK])) & (tl.broadcast_to(tmp34, [XBLOCK]) < 4)) | ~(tmp25 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp34, [XBLOCK]) < 4") tmp36 = tl.load(in_ptr0 + (72 + x1 + (16*x2)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp36 + tmp6 tmp38 = tmp36 < 0 tmp39 = tl.where(tmp38, tmp37, tmp36) tl.device_assert(((0 <= tl.broadcast_to(tmp39, [XBLOCK])) & (tl.broadcast_to(tmp39, [XBLOCK]) < 4)) | ~(tmp25 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp39, [XBLOCK]) < 4") tmp41 = tl.load(in_ptr1 + (256 + tmp29 + (4*tmp39) + (16*tmp34) + (64*x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tmp0 >= tmp23 tmp43 = tl.full([1], 3, tl.int64) tmp44 = tmp0 < tmp43 tmp45 = tmp42 & tmp44 tmp46 = tl.load(in_ptr0 + (128 + x1 + (16*x2)), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp46 + tmp6 tmp48 = tmp46 < 0 tmp49 = tl.where(tmp48, tmp47, tmp46) tl.device_assert(((0 <= tl.broadcast_to(tmp49, [XBLOCK])) & (tl.broadcast_to(tmp49, [XBLOCK]) < 4)) | ~(tmp45 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp49, [XBLOCK]) < 4") tmp51 = tl.load(in_ptr0 + (132 + x1 + (16*x2)), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp51 + tmp6 tmp53 = tmp51 < 0 tmp54 = tl.where(tmp53, tmp52, tmp51) tl.device_assert(((0 <= tl.broadcast_to(tmp54, [XBLOCK])) & (tl.broadcast_to(tmp54, [XBLOCK]) < 4)) | ~(tmp45 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp54, [XBLOCK]) < 4") tmp56 = tl.load(in_ptr0 + (136 + x1 + (16*x2)), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp56 + tmp6 tmp58 = tmp56 < 0 tmp59 = tl.where(tmp58, tmp57, tmp56) tl.device_assert(((0 <= tl.broadcast_to(tmp59, [XBLOCK])) & (tl.broadcast_to(tmp59, [XBLOCK]) < 4)) | ~(tmp45 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp59, [XBLOCK]) < 4") tmp61 = tl.load(in_ptr1 + (512 + tmp49 + (4*tmp59) + (16*tmp54) + (64*x0)), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp62 = tmp0 >= tmp43 tmp63 = tl.full([1], 4, tl.int64) tmp64 = tmp0 < tmp63 tmp65 = tl.load(in_ptr0 + (192 + x1 + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp65 + tmp6 tmp67 = tmp65 < 0 tmp68 = tl.where(tmp67, tmp66, tmp65) tl.device_assert(((0 <= tl.broadcast_to(tmp68, [XBLOCK])) & (tl.broadcast_to(tmp68, [XBLOCK]) < 4)) | ~(tmp62 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp68, [XBLOCK]) < 4") tmp70 = tl.load(in_ptr0 + (196 + x1 + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp71 = tmp70 + tmp6 tmp72 = tmp70 < 0 tmp73 = tl.where(tmp72, tmp71, tmp70) tl.device_assert(((0 <= tl.broadcast_to(tmp73, [XBLOCK])) & (tl.broadcast_to(tmp73, [XBLOCK]) < 4)) | ~(tmp62 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp73, [XBLOCK]) < 4") tmp75 = tl.load(in_ptr0 + (200 + x1 + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tmp75 + tmp6 tmp77 = tmp75 < 0 tmp78 = tl.where(tmp77, tmp76, tmp75) tl.device_assert(((0 <= tl.broadcast_to(tmp78, [XBLOCK])) & (tl.broadcast_to(tmp78, [XBLOCK]) < 4)) | ~(tmp62 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp78, [XBLOCK]) < 4") tmp80 = tl.load(in_ptr1 + (768 + tmp68 + (4*tmp78) + (16*tmp73) + (64*x0)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tl.where(tmp45, tmp61, tmp80) tmp82 = tl.where(tmp25, tmp41, tmp81) tmp83 = tl.where(tmp4, tmp21, tmp82) tl.store(out_ptr0 + (x5), tmp83, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [add, mul, sub, round_1, temp_cloud, setitem], Original ATen: [aten.add, aten.mul, aten.sub, aten.round, aten._to_copy, aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf0, arg1_1, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data.dataset class GetSegPred(torch.nn.Module): def __init__(self, scale): super(GetSegPred, self).__init__() self.scale = scale // 2 def forward(self, segs, ptcloud): temp_cloud = torch.round((ptcloud + 1) * self.scale - 0.501).long() temp_cloud[temp_cloud == -1] = 0 segsT = torch.transpose(segs, 1, 4) preds = [] for i, p in enumerate(temp_cloud): pred = segsT[i, p[:, 0], p[:, 1], p[:, 2]].unsqueeze(dim=0) preds.append(pred) return torch.cat(preds, dim=0).contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data.dataset assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 * tmp3 tmp5 = 0.501 tmp6 = tmp4 - tmp5 tmp7 = libdevice.nearbyint(tmp6) tmp8 = tmp7.to(tl.int64) tmp9 = tl.full([1], -1, tl.int64) tmp10 = tmp8 == tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = tl.where(tmp10, tmp11, tmp8) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x0 = xindex % 4 x5 = xindex tmp0 = x3 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 4, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tl.device_assert((0 <= tl.broadcast_to(tmp9, [XBLOCK])) & (tl. broadcast_to(tmp9, [XBLOCK]) < 4) | ~(tmp4 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp9, [XBLOCK]) < 4') tmp11 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp6 tmp13 = tmp11 < 0 tmp14 = tl.where(tmp13, tmp12, tmp11) tl.device_assert((0 <= tl.broadcast_to(tmp14, [XBLOCK])) & (tl. broadcast_to(tmp14, [XBLOCK]) < 4) | ~(tmp4 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp14, [XBLOCK]) < 4') tmp16 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp6 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tl.device_assert((0 <= tl.broadcast_to(tmp19, [XBLOCK])) & (tl. broadcast_to(tmp19, [XBLOCK]) < 4) | ~(tmp4 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp19, [XBLOCK]) < 4') tmp21 = tl.load(in_ptr1 + (tmp9 + 4 * tmp19 + 16 * tmp14 + 64 * x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 2, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp26 = tl.load(in_ptr0 + (64 + x1 + 16 * x2), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tmp26 + tmp6 tmp28 = tmp26 < 0 tmp29 = tl.where(tmp28, tmp27, tmp26) tl.device_assert((0 <= tl.broadcast_to(tmp29, [XBLOCK])) & (tl. broadcast_to(tmp29, [XBLOCK]) < 4) | ~(tmp25 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp29, [XBLOCK]) < 4') tmp31 = tl.load(in_ptr0 + (68 + x1 + 16 * x2), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 + tmp6 tmp33 = tmp31 < 0 tmp34 = tl.where(tmp33, tmp32, tmp31) tl.device_assert((0 <= tl.broadcast_to(tmp34, [XBLOCK])) & (tl. broadcast_to(tmp34, [XBLOCK]) < 4) | ~(tmp25 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp34, [XBLOCK]) < 4') tmp36 = tl.load(in_ptr0 + (72 + x1 + 16 * x2), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp36 + tmp6 tmp38 = tmp36 < 0 tmp39 = tl.where(tmp38, tmp37, tmp36) tl.device_assert((0 <= tl.broadcast_to(tmp39, [XBLOCK])) & (tl. broadcast_to(tmp39, [XBLOCK]) < 4) | ~(tmp25 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp39, [XBLOCK]) < 4') tmp41 = tl.load(in_ptr1 + (256 + tmp29 + 4 * tmp39 + 16 * tmp34 + 64 * x0), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tmp0 >= tmp23 tmp43 = tl.full([1], 3, tl.int64) tmp44 = tmp0 < tmp43 tmp45 = tmp42 & tmp44 tmp46 = tl.load(in_ptr0 + (128 + x1 + 16 * x2), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp46 + tmp6 tmp48 = tmp46 < 0 tmp49 = tl.where(tmp48, tmp47, tmp46) tl.device_assert((0 <= tl.broadcast_to(tmp49, [XBLOCK])) & (tl. broadcast_to(tmp49, [XBLOCK]) < 4) | ~(tmp45 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp49, [XBLOCK]) < 4') tmp51 = tl.load(in_ptr0 + (132 + x1 + 16 * x2), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp51 + tmp6 tmp53 = tmp51 < 0 tmp54 = tl.where(tmp53, tmp52, tmp51) tl.device_assert((0 <= tl.broadcast_to(tmp54, [XBLOCK])) & (tl. broadcast_to(tmp54, [XBLOCK]) < 4) | ~(tmp45 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp54, [XBLOCK]) < 4') tmp56 = tl.load(in_ptr0 + (136 + x1 + 16 * x2), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp56 + tmp6 tmp58 = tmp56 < 0 tmp59 = tl.where(tmp58, tmp57, tmp56) tl.device_assert((0 <= tl.broadcast_to(tmp59, [XBLOCK])) & (tl. broadcast_to(tmp59, [XBLOCK]) < 4) | ~(tmp45 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp59, [XBLOCK]) < 4') tmp61 = tl.load(in_ptr1 + (512 + tmp49 + 4 * tmp59 + 16 * tmp54 + 64 * x0), tmp45 & xmask, eviction_policy='evict_last', other=0.0) tmp62 = tmp0 >= tmp43 tl.full([1], 4, tl.int64) tmp65 = tl.load(in_ptr0 + (192 + x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp65 + tmp6 tmp67 = tmp65 < 0 tmp68 = tl.where(tmp67, tmp66, tmp65) tl.device_assert((0 <= tl.broadcast_to(tmp68, [XBLOCK])) & (tl. broadcast_to(tmp68, [XBLOCK]) < 4) | ~(tmp62 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp68, [XBLOCK]) < 4') tmp70 = tl.load(in_ptr0 + (196 + x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp71 = tmp70 + tmp6 tmp72 = tmp70 < 0 tmp73 = tl.where(tmp72, tmp71, tmp70) tl.device_assert((0 <= tl.broadcast_to(tmp73, [XBLOCK])) & (tl. broadcast_to(tmp73, [XBLOCK]) < 4) | ~(tmp62 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp73, [XBLOCK]) < 4') tmp75 = tl.load(in_ptr0 + (200 + x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tmp75 + tmp6 tmp77 = tmp75 < 0 tmp78 = tl.where(tmp77, tmp76, tmp75) tl.device_assert((0 <= tl.broadcast_to(tmp78, [XBLOCK])) & (tl. broadcast_to(tmp78, [XBLOCK]) < 4) | ~(tmp62 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp78, [XBLOCK]) < 4') tmp80 = tl.load(in_ptr1 + (768 + tmp68 + 4 * tmp78 + 16 * tmp73 + 64 * x0), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tl.where(tmp45, tmp61, tmp80) tmp82 = tl.where(tmp25, tmp41, tmp81) tmp83 = tl.where(tmp4, tmp21, tmp82) tl.store(out_ptr0 + x5, tmp83, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_add_index_put_lift_fresh_mul_round_sub_0[grid (256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(256)](buf0, arg1_1, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 del buf0 return buf1, class GetSegPredNew(torch.nn.Module): def __init__(self, scale): super(GetSegPredNew, self).__init__() self.scale = scale // 2 def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
melisataspinar/Concurrent-Completion-and-Part-Segmentation-for-3D-Missing-Point-Clouds-viaSynergistic-Feature-Mappi
GetSegPred
false
10,461
[ "MIT" ]
0
3b03f3c167d9927a660d798ffcd8ecc0f5cbaf89
https://github.com/melisataspinar/Concurrent-Completion-and-Part-Segmentation-for-3D-Missing-Point-Clouds-viaSynergistic-Feature-Mappi/tree/3b03f3c167d9927a660d798ffcd8ecc0f5cbaf89
StyleMod
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yg/cygzi6evso6kefobgrwgjcxh5qgbn7zouyf7to742xeh6bsflplb.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2n/c2npkiljqvyo5boces3kqpxvha7qfatrulgz7qm2oitxblzqbabd.py # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # add => add # mul_2 => mul_2 # x => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %select_1), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 4 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1 + (8*x2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + (8*x2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, ), (1, )) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_1.run(primals_4, buf1, primals_1, buf2, 4096, grid=grid(4096), stream=stream0) del buf1 del primals_1 return (buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleMod(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleMod, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, x, latent): style = self.lin(latent) shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x3, tmp12, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8,), (1,)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_1 return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleModNew(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleModNew, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, input_0, input_1): primals_2 = self.lin.weight primals_1 = self.lin.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
justinpinkney/ganspace
StyleMod
false
10,462
[ "Apache-2.0" ]
0
7dc76d1d2ddad21d946a7ceb375efe5d5316fb3f
https://github.com/justinpinkney/ganspace/tree/7dc76d1d2ddad21d946a7ceb375efe5d5316fb3f
MultiHeadedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_8, buf8, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from typing import Optional import torch.nn.functional as F from torch import nn def attention(query, key, value, mask=None, dropout=None): """Compute 'Scaled Dot Product Attention' """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) weights = F.softmax(scores, dim=-1) if dropout is not None: weights = dropout(weights) out = torch.matmul(weights, value) return out, weights class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): """Take in model size and number of heads.""" super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 self.d_head = d_model // h self.h = h self.proj_q = nn.Linear(d_model, d_model) self.proj_k = nn.Linear(d_model, d_model) self.proj_v = nn.Linear(d_model, d_model) self.ret_proj = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None): """ query: (batch_size, seq_len, dmodel) key: (batch_size, seq_len, dmodel) value: (batch_size, seq_len, dmodel) mask: (batch_size, seq_len) """ if mask is not None: mask = mask.unsqueeze(1) batch_size = query.size(0) seq_len = query.size(1) query = self.proj_q(query).view(batch_size, seq_len, self.h, self. d_head).transpose(1, 2) key = self.proj_k(key).view(batch_size, seq_len, self.h, self.d_head ).transpose(1, 2) value = self.proj_v(value).view(batch_size, seq_len, self.h, self. d_head).transpose(1, 2) x, _ = attention(query, key, value, mask=mask, dropout=self.dropout) x = x.transpose(1, 2).contiguous().view(batch_size, seq_len, self.h * self.d_head) return self.ret_proj(x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'h': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10 def attention(query, key, value, mask=None, dropout=None): """Compute 'Scaled Dot Product Attention' """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) weights = F.softmax(scores, dim=-1) if dropout is not None: weights = dropout(weights) out = torch.matmul(weights, value) return out, weights class MultiHeadedAttentionNew(nn.Module): def __init__(self, h, d_model, dropout=0.1): """Take in model size and number of heads.""" super(MultiHeadedAttentionNew, self).__init__() assert d_model % h == 0 self.d_head = d_model // h self.h = h self.proj_q = nn.Linear(d_model, d_model) self.proj_k = nn.Linear(d_model, d_model) self.proj_v = nn.Linear(d_model, d_model) self.ret_proj = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0, input_1, input_2): primals_2 = self.proj_q.weight primals_3 = self.proj_q.bias primals_4 = self.proj_k.weight primals_5 = self.proj_k.bias primals_7 = self.proj_v.weight primals_8 = self.proj_v.bias primals_10 = self.ret_proj.weight primals_11 = self.ret_proj.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
malhotraa/transformer-experiments
MultiHeadedAttention
false
10,463
[ "MIT" ]
0
82931b89b14d26dbd6e4ffef8d6f2fd8b7279c0f
https://github.com/malhotraa/transformer-experiments/tree/82931b89b14d26dbd6e4ffef8d6f2fd8b7279c0f
GE2ELoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/i3/ci3z4sjonlh5q5onksb7pcs7gyh4blejtoz4atayshpru5wafc22.py # Topologically Sorted Source Nodes: [new_centroids, new_centroids_3, new_centroids_4, new_centroids_7, new_centroids_8, new_centroids_11, new_centroids_12], Original ATen: [aten.stack] # Source node to ATen node mapping: # new_centroids => cat # new_centroids_11 => cat_19 # new_centroids_12 => cat_21 # new_centroids_3 => cat_5 # new_centroids_4 => cat_7 # new_centroids_7 => cat_12 # new_centroids_8 => cat_14 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_1, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_5 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_4, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_7 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_5, %select_11, %select_12],), kwargs = {}) # %cat_12 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_8, %select_11, %select_12],), kwargs = {}) # %cat_14 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_9, %select_12],), kwargs = {}) # %cat_19 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_12, %select_12],), kwargs = {}) # %cat_21 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_13],), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (8 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (12 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = 3.0 tmp11 = tmp9 / tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 8, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tmp14 & tmp16 tmp18 = tl.load(in_ptr0 + (16 + ((-4) + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.load(in_ptr0 + (20 + ((-4) + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tl.load(in_ptr0 + (24 + ((-4) + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr0 + (28 + ((-4) + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 + tmp23 tmp25 = 4.0 tmp26 = tmp24 / tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp17, tmp26, tmp27) tmp29 = tmp0 >= tmp15 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr0 + (32 + ((-8) + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tl.load(in_ptr0 + (36 + ((-8) + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tmp33 + tmp34 tmp36 = tl.load(in_ptr0 + (40 + ((-8) + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tl.load(in_ptr0 + (44 + ((-8) + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 + tmp38 tmp40 = tmp39 / tmp25 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp32, tmp40, tmp41) tmp43 = tmp0 >= tmp30 tmp44 = tl.full([1], 16, tl.int64) tmp45 = tmp0 < tmp44 tmp46 = tl.load(in_ptr0 + (48 + ((-12) + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr0 + (52 + ((-12) + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp46 + tmp47 tmp49 = tl.load(in_ptr0 + (56 + ((-12) + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tmp48 + tmp49 tmp51 = tl.load(in_ptr0 + (60 + ((-12) + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 + tmp51 tmp53 = tmp52 / tmp25 tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype) tmp55 = tl.where(tmp43, tmp53, tmp54) tmp56 = tl.where(tmp32, tmp42, tmp55) tmp57 = tl.where(tmp17, tmp28, tmp56) tmp58 = tl.where(tmp4, tmp13, tmp57) tmp59 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tmp59 + tmp5 tmp61 = tmp60 + tmp6 tmp62 = tmp61 / tmp10 tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype) tmp64 = tl.where(tmp4, tmp62, tmp63) tmp65 = tl.where(tmp4, tmp64, tmp57) tmp66 = tmp61 + tmp8 tmp67 = tmp66 / tmp25 tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype) tmp69 = tl.where(tmp4, tmp67, tmp68) tmp70 = tmp19 + tmp21 tmp71 = tmp70 + tmp23 tmp72 = tmp71 / tmp10 tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype) tmp74 = tl.where(tmp17, tmp72, tmp73) tmp75 = tl.where(tmp17, tmp74, tmp56) tmp76 = tl.where(tmp4, tmp69, tmp75) tmp77 = tmp22 / tmp10 tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype) tmp79 = tl.where(tmp17, tmp77, tmp78) tmp80 = tl.where(tmp17, tmp79, tmp56) tmp81 = tl.where(tmp4, tmp69, tmp80) tmp82 = tmp34 + tmp36 tmp83 = tmp82 + tmp38 tmp84 = tmp83 / tmp10 tmp85 = tl.full(tmp84.shape, 0.0, tmp84.dtype) tmp86 = tl.where(tmp32, tmp84, tmp85) tmp87 = tl.where(tmp32, tmp86, tmp55) tmp88 = tl.where(tmp17, tmp28, tmp87) tmp89 = tl.where(tmp4, tmp69, tmp88) tmp90 = tmp37 / tmp10 tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype) tmp92 = tl.where(tmp32, tmp90, tmp91) tmp93 = tl.where(tmp32, tmp92, tmp55) tmp94 = tl.where(tmp17, tmp28, tmp93) tmp95 = tl.where(tmp4, tmp69, tmp94) tmp96 = tmp47 + tmp49 tmp97 = tmp96 + tmp51 tmp98 = tmp97 / tmp10 tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype) tmp100 = tl.where(tmp43, tmp98, tmp99) tmp101 = tl.where(tmp32, tmp42, tmp100) tmp102 = tl.where(tmp17, tmp28, tmp101) tmp103 = tl.where(tmp4, tmp69, tmp102) tl.store(out_ptr0 + (x0), tmp58, xmask) tl.store(out_ptr1 + (x0), tmp65, xmask) tl.store(out_ptr2 + (x0), tmp76, xmask) tl.store(out_ptr3 + (x0), tmp81, xmask) tl.store(out_ptr4 + (x0), tmp89, xmask) tl.store(out_ptr5 + (x0), tmp95, xmask) tl.store(out_ptr6 + (x0), tmp103, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ep/cepvlwnn5h76ozdsbquyuiv4b32445eiuhfjkyovopyyh7r6lqdf.py # Topologically Sorted Source Nodes: [norm, excl_2, excl_3], Original ATen: [aten.linalg_vector_norm, aten.cat, aten.mean] # Source node to ATen node mapping: # excl_2 => cat_1 # excl_3 => mean_2 # norm => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_4, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_3, %slice_4],), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_1, [0]), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_1 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = tl.full([1, 1], 0, tl.int64) tmp6 = tmp5 >= tmp5 tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tl.load(in_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, other=0.0) tmp10 = tmp5 >= tmp7 tmp11 = tl.full([1, 1], 3, tl.int64) tmp12 = tmp5 < tmp11 tmp13 = tl.load(in_ptr0 + (tl.broadcast_to(8 + r0 + (4*(-1)), [XBLOCK, RBLOCK])), tmp10, other=0.0) tmp14 = tl.where(tmp8, tmp9, tmp13) tmp15 = tmp7 >= tmp5 tmp16 = tmp7 < tmp7 tmp17 = tl.load(in_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp16, other=0.0) tmp18 = tmp7 >= tmp7 tmp19 = tmp7 < tmp11 tmp20 = tl.load(in_ptr0 + (tl.broadcast_to(8 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp18, other=0.0) tmp21 = tl.where(tmp16, tmp17, tmp20) tmp22 = tmp14 + tmp21 tmp23 = tl.full([1, 1], 2, tl.int64) tmp24 = tmp23 >= tmp5 tmp25 = tmp23 < tmp7 tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp25, other=0.0) tmp27 = tmp23 >= tmp7 tmp28 = tmp23 < tmp11 tmp29 = tl.load(in_ptr0 + (tl.broadcast_to(8 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp27, other=0.0) tmp30 = tl.where(tmp25, tmp26, tmp29) tmp31 = tmp22 + tmp30 tmp32 = 3.0 tmp33 = tmp31 / tmp32 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp33, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/mf/cmfeaa5x7m36qp3iyiau26fjfz3dxqznxvc4k6n4bc2o7xru5x2l.py # Topologically Sorted Source Nodes: [new_centroids_1, new_centroids_2, norm_8, excl_10, excl_11], Original ATen: [aten.stack, aten.linalg_vector_norm, aten.cat, aten.mean] # Source node to ATen node mapping: # excl_10 => cat_8 # excl_11 => mean_6 # new_centroids_1 => cat_2 # new_centroids_2 => cat_4 # norm_8 => pow_17, sum_9 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_2, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_3, %select_10, %select_11, %select_12],), kwargs = {}) # %pow_17 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_28, 2), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_17, None), kwargs = {}) # %cat_8 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_11, %slice_12],), kwargs = {}) # %mean_6 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_8, [0]), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_stack_2 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_stack_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (16 + r0), None) tmp5 = tl.load(in_ptr0 + (20 + r0), None) tmp7 = tl.load(in_ptr0 + (24 + r0), None) tmp9 = tl.load(in_ptr0 + (28 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp0 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tl.full([1, 1], 0, tl.int64) tmp14 = tmp13 >= tmp13 tmp15 = tl.full([1, 1], 1, tl.int64) tmp16 = tmp13 < tmp15 tmp17 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0, [XBLOCK, RBLOCK])), tmp16, other=0.0) tmp18 = tmp13 >= tmp15 tmp19 = tl.full([1, 1], 3, tl.int64) tmp20 = tmp13 < tmp19 tmp21 = tl.load(in_ptr0 + (tl.broadcast_to(24 + r0 + (4*(-1)), [XBLOCK, RBLOCK])), tmp18, other=0.0) tmp22 = tl.where(tmp16, tmp17, tmp21) tmp23 = tmp15 >= tmp13 tmp24 = tmp15 < tmp15 tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0, [XBLOCK, RBLOCK])), tmp24, other=0.0) tmp26 = tmp15 >= tmp15 tmp27 = tmp15 < tmp19 tmp28 = tl.load(in_ptr0 + (tl.broadcast_to(24 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp26, other=0.0) tmp29 = tl.where(tmp24, tmp25, tmp28) tmp30 = tmp22 + tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp32 = tmp31 >= tmp13 tmp33 = tmp31 < tmp15 tmp34 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0, [XBLOCK, RBLOCK])), tmp33, other=0.0) tmp35 = tmp31 >= tmp15 tmp36 = tmp31 < tmp19 tmp37 = tl.load(in_ptr0 + (tl.broadcast_to(24 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp35, other=0.0) tmp38 = tl.where(tmp33, tmp34, tmp37) tmp39 = tmp30 + tmp38 tmp40 = 3.0 tmp41 = tmp39 / tmp40 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp41, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4j/c4jrilvt5mqtiyzl6ukdzjjkxjtxwj4vcmqdi3frlq5doee6agn3.py # Topologically Sorted Source Nodes: [new_centroids_1, new_centroids_2, new_centroids_5, new_centroids_6, norm_16, excl_18, excl_19, norm_18], Original ATen: [aten.stack, aten.linalg_vector_norm, aten.cat, aten.mean] # Source node to ATen node mapping: # excl_18 => cat_15 # excl_19 => mean_10 # new_centroids_1 => cat_2 # new_centroids_2 => cat_4 # new_centroids_5 => cat_9 # new_centroids_6 => cat_11 # norm_16 => pow_33, sum_17 # norm_18 => pow_37, sum_19 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_2, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_3, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_9 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_6, %select_11, %select_12],), kwargs = {}) # %cat_11 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_7, %select_11, %select_12],), kwargs = {}) # %pow_33 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_52, 2), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_33, None), kwargs = {}) # %cat_15 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_19, %slice_20],), kwargs = {}) # %mean_10 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_15, [0]), kwargs = {}) # %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_53, 2), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, None), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_stack_3 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_stack_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 7), equal_to_1=(8,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (32 + r0), None) tmp5 = tl.load(in_ptr0 + (36 + r0), None) tmp7 = tl.load(in_ptr0 + (40 + r0), None) tmp9 = tl.load(in_ptr0 + (44 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp0 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tl.full([1, 1], 0, tl.int64) tmp14 = tmp13 >= tmp13 tmp15 = tl.full([1, 1], 1, tl.int64) tmp16 = tmp13 < tmp15 tmp17 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0, [XBLOCK, RBLOCK])), tmp16, other=0.0) tmp18 = tmp13 >= tmp15 tmp19 = tl.full([1, 1], 3, tl.int64) tmp20 = tmp13 < tmp19 tmp21 = tl.load(in_ptr0 + (tl.broadcast_to(40 + r0 + (4*(-1)), [XBLOCK, RBLOCK])), tmp18, other=0.0) tmp22 = tl.where(tmp16, tmp17, tmp21) tmp23 = tmp15 >= tmp13 tmp24 = tmp15 < tmp15 tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0, [XBLOCK, RBLOCK])), tmp24, other=0.0) tmp26 = tmp15 >= tmp15 tmp27 = tmp15 < tmp19 tmp28 = tl.load(in_ptr0 + (tl.broadcast_to(40 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp26, other=0.0) tmp29 = tl.where(tmp24, tmp25, tmp28) tmp30 = tmp22 + tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp32 = tmp31 >= tmp13 tmp33 = tmp31 < tmp15 tmp34 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0, [XBLOCK, RBLOCK])), tmp33, other=0.0) tmp35 = tmp31 >= tmp15 tmp36 = tmp31 < tmp19 tmp37 = tl.load(in_ptr0 + (tl.broadcast_to(40 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp35, other=0.0) tmp38 = tl.where(tmp33, tmp34, tmp37) tmp39 = tmp30 + tmp38 tmp40 = 3.0 tmp41 = tmp39 / tmp40 tmp42 = tmp5 * tmp5 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp45 = tl.sum(tmp43, 1)[:, None] tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp41, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) tl.store(out_ptr6 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp45, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jy/cjyty4bxscbvyfultdbj4vrypnzsswd6efeo2rhnwgxg6mg72y7r.py # Topologically Sorted Source Nodes: [new_centroids_1, new_centroids_2, new_centroids_5, new_centroids_6, new_centroids_9, new_centroids_10, norm_24, excl_26, excl_27], Original ATen: [aten.stack, aten.linalg_vector_norm, aten.cat, aten.mean] # Source node to ATen node mapping: # excl_26 => cat_22 # excl_27 => mean_14 # new_centroids_1 => cat_2 # new_centroids_10 => cat_18 # new_centroids_2 => cat_4 # new_centroids_5 => cat_9 # new_centroids_6 => cat_11 # new_centroids_9 => cat_16 # norm_24 => pow_49, sum_25 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_2, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean_3, %select_10, %select_11, %select_12],), kwargs = {}) # %cat_9 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_6, %select_11, %select_12],), kwargs = {}) # %cat_11 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_7, %select_11, %select_12],), kwargs = {}) # %cat_16 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_10, %select_12],), kwargs = {}) # %cat_18 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_11, %select_12],), kwargs = {}) # %pow_49 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_76, 2), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_49, None), kwargs = {}) # %cat_22 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_27, %slice_28],), kwargs = {}) # %mean_14 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_22, [0]), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_stack_4 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_stack_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {9: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(9,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (48 + r0), None) tmp5 = tl.load(in_ptr0 + (52 + r0), None) tmp7 = tl.load(in_ptr0 + (56 + r0), None) tmp9 = tl.load(in_ptr0 + (60 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp0 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tl.full([1, 1], 0, tl.int64) tmp14 = tmp13 >= tmp13 tmp15 = tl.full([1, 1], 1, tl.int64) tmp16 = tmp13 < tmp15 tmp17 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0, [XBLOCK, RBLOCK])), tmp16, other=0.0) tmp18 = tmp13 >= tmp15 tmp19 = tl.full([1, 1], 3, tl.int64) tmp20 = tmp13 < tmp19 tmp21 = tl.load(in_ptr0 + (tl.broadcast_to(56 + r0 + (4*(-1)), [XBLOCK, RBLOCK])), tmp18, other=0.0) tmp22 = tl.where(tmp16, tmp17, tmp21) tmp23 = tmp15 >= tmp13 tmp24 = tmp15 < tmp15 tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0, [XBLOCK, RBLOCK])), tmp24, other=0.0) tmp26 = tmp15 >= tmp15 tmp27 = tmp15 < tmp19 tmp28 = tl.load(in_ptr0 + (tl.broadcast_to(56 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp26, other=0.0) tmp29 = tl.where(tmp24, tmp25, tmp28) tmp30 = tmp22 + tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp32 = tmp31 >= tmp13 tmp33 = tmp31 < tmp15 tmp34 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0, [XBLOCK, RBLOCK])), tmp33, other=0.0) tmp35 = tmp31 >= tmp15 tmp36 = tmp31 < tmp19 tmp37 = tl.load(in_ptr0 + (tl.broadcast_to(56 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp35, other=0.0) tmp38 = tl.where(tmp33, tmp34, tmp37) tmp39 = tmp30 + tmp38 tmp40 = 3.0 tmp41 = tmp39 / tmp40 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr7 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp41, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wg/cwgqgf5md4z72c43htycwc3ktpi7aaquyuyylibbxopxvlx3xjpw.py # Topologically Sorted Source Nodes: [norm_2], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # norm_2 => pow_5, sum_3 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_5, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, None), kwargs = {}) triton_per_fused_linalg_vector_norm_5 = async_compile.triton('triton_per_fused_linalg_vector_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_5(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/fn/cfnvzcqi44e6h2vvavig5ohe2okvf7vbtlnytu63pi7tyguwayv6.py # Topologically Sorted Source Nodes: [excl_4, excl_5, norm_4, norm_6, new_centroids_5, new_centroids_6, new_centroids_9, new_centroids_10, new_centroids_13, new_centroids_14], Original ATen: [aten.cat, aten.mean, aten.linalg_vector_norm, aten.stack] # Source node to ATen node mapping: # excl_4 => cat_3 # excl_5 => mean_3 # new_centroids_10 => cat_18 # new_centroids_13 => cat_23 # new_centroids_14 => cat_25 # new_centroids_5 => cat_9 # new_centroids_6 => cat_11 # new_centroids_9 => cat_16 # norm_4 => pow_9, sum_5 # norm_6 => pow_13, sum_7 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_6],), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_3, [0]), kwargs = {}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_6, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_9, None), kwargs = {}) # %pow_13 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_7, 2), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_13, None), kwargs = {}) # %cat_9 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_6, %select_11, %select_12],), kwargs = {}) # %cat_11 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %mean_7, %select_11, %select_12],), kwargs = {}) # %cat_16 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_10, %select_12],), kwargs = {}) # %cat_18 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_11, %select_12],), kwargs = {}) # %cat_23 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_14],), kwargs = {}) # %cat_25 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_15],), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_stack_6 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_stack_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {10: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=(10,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp39 = tl.load(in_ptr0 + (r0), None) tmp40 = tl.load(in_ptr0 + (4 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tmp10 >= tmp10 tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp10 < tmp12 tmp14 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*0), [XBLOCK, RBLOCK])), tmp13, other=0.0) tmp15 = tmp10 >= tmp12 tmp16 = tl.full([1, 1], 3, tl.int64) tmp17 = tmp10 < tmp16 tmp18 = tl.load(in_ptr0 + (tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK])), tmp15, other=0.0) tmp19 = tl.where(tmp13, tmp14, tmp18) tmp20 = tl.full([1, 1], 1, tl.int64) tmp21 = tmp20 >= tmp10 tmp22 = tmp20 < tmp12 tmp23 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*1), [XBLOCK, RBLOCK])), tmp22, other=0.0) tmp24 = tmp20 >= tmp12 tmp25 = tmp20 < tmp16 tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK])), tmp24, other=0.0) tmp27 = tl.where(tmp22, tmp23, tmp26) tmp28 = tmp19 + tmp27 tmp29 = tmp12 >= tmp10 tmp30 = tmp12 < tmp12 tmp31 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*2), [XBLOCK, RBLOCK])), tmp30, other=0.0) tmp32 = tmp12 >= tmp12 tmp33 = tmp12 < tmp16 tmp34 = tl.load(in_ptr0 + (tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK])), tmp32, other=0.0) tmp35 = tl.where(tmp30, tmp31, tmp34) tmp36 = tmp28 + tmp35 tmp37 = 3.0 tmp38 = tmp36 / tmp37 tmp41 = tmp39 + tmp40 tmp42 = tmp41 + tmp0 tmp43 = tmp42 + tmp5 tmp44 = 4.0 tmp45 = tmp43 / tmp44 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp38, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr7 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr8 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uv/cuv3j34526wda3guhmc53tumbwtq6kgrjwarktyyx43qheojwlvy.py # Topologically Sorted Source Nodes: [cs_row], Original ATen: [aten.cat] # Source node to ATen node mapping: # cs_row => cat_6 # Graph fragment: # %cat_6 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%clamp_min, %clamp_min_1, %clamp_min_2, %clamp_min_3],), kwargs = {}) triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp32 = tl.load(in_ptr4 + (0)) tmp33 = tl.broadcast_to(tmp32, [XBLOCK]) tmp57 = tl.load(in_ptr7 + (0)) tmp58 = tl.broadcast_to(tmp57, [XBLOCK]) tmp81 = tl.load(in_ptr10 + (0)) tmp82 = tl.broadcast_to(tmp81, [XBLOCK]) tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = libdevice.sqrt(tmp7) tmp9 = tl.load(in_ptr2 + (4*x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 * tmp9 tmp11 = tl.load(in_ptr2 + (1 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr2 + (2 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tl.load(in_ptr2 + (3 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = libdevice.sqrt(tmp19) tmp21 = tmp8 * tmp20 tmp22 = tmp5 / tmp21 tmp23 = 1e-06 tmp24 = triton_helpers.maximum(tmp22, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp4, tmp24, tmp25) tmp27 = tmp0 >= tmp3 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr3 + (x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = libdevice.sqrt(tmp33) tmp35 = tl.load(in_ptr5 + (4*x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp35 * tmp35 tmp37 = tl.load(in_ptr5 + (1 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tl.load(in_ptr5 + (2 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tl.load(in_ptr5 + (3 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = libdevice.sqrt(tmp45) tmp47 = tmp34 * tmp46 tmp48 = tmp31 / tmp47 tmp49 = triton_helpers.maximum(tmp48, tmp23) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp30, tmp49, tmp50) tmp52 = tmp0 >= tmp28 tmp53 = tl.full([1], 3, tl.int64) tmp54 = tmp0 < tmp53 tmp55 = tmp52 & tmp54 tmp56 = tl.load(in_ptr6 + (x0), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp59 = libdevice.sqrt(tmp58) tmp60 = tl.load(in_ptr8 + (4*x0), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 * tmp60 tmp62 = tl.load(in_ptr8 + (1 + (4*x0)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp63 = tmp62 * tmp62 tmp64 = tmp61 + tmp63 tmp65 = tl.load(in_ptr8 + (2 + (4*x0)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp65 * tmp65 tmp67 = tmp64 + tmp66 tmp68 = tl.load(in_ptr8 + (3 + (4*x0)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp69 = tmp68 * tmp68 tmp70 = tmp67 + tmp69 tmp71 = libdevice.sqrt(tmp70) tmp72 = tmp59 * tmp71 tmp73 = tmp56 / tmp72 tmp74 = triton_helpers.maximum(tmp73, tmp23) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp55, tmp74, tmp75) tmp77 = tmp0 >= tmp53 tmp78 = tl.full([1], 4, tl.int64) tmp79 = tmp0 < tmp78 tmp80 = tl.load(in_ptr9 + (x0), tmp77 & xmask, eviction_policy='evict_last', other=0.0) tmp83 = libdevice.sqrt(tmp82) tmp84 = tl.load(in_ptr11 + (4*x0), tmp77 & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tmp84 * tmp84 tmp86 = tl.load(in_ptr11 + (1 + (4*x0)), tmp77 & xmask, eviction_policy='evict_last', other=0.0) tmp87 = tmp86 * tmp86 tmp88 = tmp85 + tmp87 tmp89 = tl.load(in_ptr11 + (2 + (4*x0)), tmp77 & xmask, eviction_policy='evict_last', other=0.0) tmp90 = tmp89 * tmp89 tmp91 = tmp88 + tmp90 tmp92 = tl.load(in_ptr11 + (3 + (4*x0)), tmp77 & xmask, eviction_policy='evict_last', other=0.0) tmp93 = tmp92 * tmp92 tmp94 = tmp91 + tmp93 tmp95 = libdevice.sqrt(tmp94) tmp96 = tmp83 * tmp95 tmp97 = tmp80 / tmp96 tmp98 = triton_helpers.maximum(tmp97, tmp23) tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype) tmp100 = tl.where(tmp77, tmp98, tmp99) tmp101 = tl.where(tmp55, tmp76, tmp100) tmp102 = tl.where(tmp30, tmp51, tmp101) tmp103 = tl.where(tmp4, tmp26, tmp102) tl.store(out_ptr0 + (x2), tmp103, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qw/cqwtxu5kn2bvn6whkodf5hex3x352f66d5yqfcryzy5xmydy2txx.py # Topologically Sorted Source Nodes: [norm_10, excl_12, excl_13, norm_12, norm_14, new_centroids_9, new_centroids_10, new_centroids_13, new_centroids_14], Original ATen: [aten.linalg_vector_norm, aten.cat, aten.mean, aten.stack] # Source node to ATen node mapping: # excl_12 => cat_10 # excl_13 => mean_7 # new_centroids_10 => cat_18 # new_centroids_13 => cat_23 # new_centroids_14 => cat_25 # new_centroids_9 => cat_16 # norm_10 => pow_21, sum_11 # norm_12 => pow_25, sum_13 # norm_14 => pow_29, sum_15 # Graph fragment: # %pow_21 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_29, 2), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_21, None), kwargs = {}) # %cat_10 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_13, %slice_14],), kwargs = {}) # %mean_7 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_10, [0]), kwargs = {}) # %pow_25 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_30, 2), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_25, None), kwargs = {}) # %pow_29 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_31, 2), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_29, None), kwargs = {}) # %cat_16 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_10, %select_12],), kwargs = {}) # %cat_18 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %mean_11, %select_12],), kwargs = {}) # %cat_23 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_14],), kwargs = {}) # %cat_25 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_15],), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_stack_8 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_stack_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {9: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(9,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_stack_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (20 + r0), None) tmp5 = tl.load(in_ptr0 + (24 + r0), None) tmp10 = tl.load(in_ptr0 + (28 + r0), None) tmp44 = tl.load(in_ptr0 + (16 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = tl.full([1, 1], 0, tl.int64) tmp16 = tmp15 >= tmp15 tmp17 = tl.full([1, 1], 2, tl.int64) tmp18 = tmp15 < tmp17 tmp19 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp18, other=0.0) tmp20 = tmp15 >= tmp17 tmp21 = tl.full([1, 1], 3, tl.int64) tmp22 = tmp15 < tmp21 tmp23 = tl.load(in_ptr0 + (tl.broadcast_to(28 + r0, [XBLOCK, RBLOCK])), tmp20, other=0.0) tmp24 = tl.where(tmp18, tmp19, tmp23) tmp25 = tl.full([1, 1], 1, tl.int64) tmp26 = tmp25 >= tmp15 tmp27 = tmp25 < tmp17 tmp28 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp27, other=0.0) tmp29 = tmp25 >= tmp17 tmp30 = tmp25 < tmp21 tmp31 = tl.load(in_ptr0 + (tl.broadcast_to(28 + r0, [XBLOCK, RBLOCK])), tmp29, other=0.0) tmp32 = tl.where(tmp27, tmp28, tmp31) tmp33 = tmp24 + tmp32 tmp34 = tmp17 >= tmp15 tmp35 = tmp17 < tmp17 tmp36 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*2), [XBLOCK, RBLOCK])), tmp35, other=0.0) tmp37 = tmp17 >= tmp17 tmp38 = tmp17 < tmp21 tmp39 = tl.load(in_ptr0 + (tl.broadcast_to(28 + r0, [XBLOCK, RBLOCK])), tmp37, other=0.0) tmp40 = tl.where(tmp35, tmp36, tmp39) tmp41 = tmp33 + tmp40 tmp42 = 3.0 tmp43 = tmp41 / tmp42 tmp45 = tmp44 + tmp0 tmp46 = tmp45 + tmp5 tmp47 = tmp46 + tmp10 tmp48 = 4.0 tmp49 = tmp47 / tmp48 tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp43, None) tl.store(out_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp49, None) tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp49, None) tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp49, None) tl.store(out_ptr7 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp49, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/te/cteub7ll6kedydjue4iwrcci3eookcmcdgpnser3qmghytygolqj.py # Topologically Sorted Source Nodes: [excl_20, excl_21, norm_20, norm_22, new_centroids_13, new_centroids_14], Original ATen: [aten.cat, aten.mean, aten.linalg_vector_norm, aten.stack] # Source node to ATen node mapping: # excl_20 => cat_17 # excl_21 => mean_11 # new_centroids_13 => cat_23 # new_centroids_14 => cat_25 # norm_20 => pow_41, sum_21 # norm_22 => pow_45, sum_23 # Graph fragment: # %cat_17 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_21, %slice_22],), kwargs = {}) # %mean_11 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_17, [0]), kwargs = {}) # %pow_41 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_54, 2), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_41, None), kwargs = {}) # %pow_45 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_55, 2), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_45, None), kwargs = {}) # %cat_23 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_14],), kwargs = {}) # %cat_25 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_15],), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_stack_9 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_stack_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_stack_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (40 + r0), None) tmp5 = tl.load(in_ptr0 + (44 + r0), None) tmp39 = tl.load(in_ptr0 + (32 + r0), None) tmp40 = tl.load(in_ptr0 + (36 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tmp10 >= tmp10 tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp10 < tmp12 tmp14 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp13, other=0.0) tmp15 = tmp10 >= tmp12 tmp16 = tl.full([1, 1], 3, tl.int64) tmp17 = tmp10 < tmp16 tmp18 = tl.load(in_ptr0 + (tl.broadcast_to(44 + r0, [XBLOCK, RBLOCK])), tmp15, other=0.0) tmp19 = tl.where(tmp13, tmp14, tmp18) tmp20 = tl.full([1, 1], 1, tl.int64) tmp21 = tmp20 >= tmp10 tmp22 = tmp20 < tmp12 tmp23 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp22, other=0.0) tmp24 = tmp20 >= tmp12 tmp25 = tmp20 < tmp16 tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(44 + r0, [XBLOCK, RBLOCK])), tmp24, other=0.0) tmp27 = tl.where(tmp22, tmp23, tmp26) tmp28 = tmp19 + tmp27 tmp29 = tmp12 >= tmp10 tmp30 = tmp12 < tmp12 tmp31 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*2), [XBLOCK, RBLOCK])), tmp30, other=0.0) tmp32 = tmp12 >= tmp12 tmp33 = tmp12 < tmp16 tmp34 = tl.load(in_ptr0 + (tl.broadcast_to(44 + r0, [XBLOCK, RBLOCK])), tmp32, other=0.0) tmp35 = tl.where(tmp30, tmp31, tmp34) tmp36 = tmp28 + tmp35 tmp37 = 3.0 tmp38 = tmp36 / tmp37 tmp41 = tmp39 + tmp40 tmp42 = tmp41 + tmp0 tmp43 = tmp42 + tmp5 tmp44 = 4.0 tmp45 = tmp43 / tmp44 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp38, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp45, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zz/czzgiodr25igbxfpbzkqi75p57p62kkx2z7qtcr7iljk7knobkfe.py # Topologically Sorted Source Nodes: [norm_26], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # norm_26 => pow_53, sum_27 # Graph fragment: # %pow_53 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_77, 2), kwargs = {}) # %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_53, None), kwargs = {}) triton_per_fused_linalg_vector_norm_10 = async_compile.triton('triton_per_fused_linalg_vector_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_10(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (52 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/36/c36assjyhb77brp4nnctqswixa5jroretdu3sslbnwzkvoxh4lw5.py # Topologically Sorted Source Nodes: [excl_28, excl_29, norm_30], Original ATen: [aten.cat, aten.mean, aten.linalg_vector_norm] # Source node to ATen node mapping: # excl_28 => cat_24 # excl_29 => mean_15 # norm_30 => pow_61, sum_31 # Graph fragment: # %cat_24 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_29, %slice_30],), kwargs = {}) # %mean_15 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%cat_24, [0]), kwargs = {}) # %pow_61 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_79, 2), kwargs = {}) # %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_61, None), kwargs = {}) triton_per_fused_cat_linalg_vector_norm_mean_11 = async_compile.triton('triton_per_fused_cat_linalg_vector_norm_mean_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_linalg_vector_norm_mean_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_11(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp29 = tl.load(in_ptr0 + (60 + r0), None) tmp0 = tl.full([1, 1], 0, tl.int64) tmp1 = tmp0 >= tmp0 tmp2 = tl.full([1, 1], 2, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*0), [XBLOCK, RBLOCK])), tmp3, other=0.0) tmp5 = tmp0 >= tmp2 tmp6 = tl.full([1, 1], 3, tl.int64) tmp7 = tmp0 < tmp6 tmp8 = tl.load(in_ptr0 + (tl.broadcast_to(60 + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0) tmp9 = tl.where(tmp3, tmp4, tmp8) tmp10 = tl.full([1, 1], 1, tl.int64) tmp11 = tmp10 >= tmp0 tmp12 = tmp10 < tmp2 tmp13 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*1), [XBLOCK, RBLOCK])), tmp12, other=0.0) tmp14 = tmp10 >= tmp2 tmp15 = tmp10 < tmp6 tmp16 = tl.load(in_ptr0 + (tl.broadcast_to(60 + r0, [XBLOCK, RBLOCK])), tmp14, other=0.0) tmp17 = tl.where(tmp12, tmp13, tmp16) tmp18 = tmp9 + tmp17 tmp19 = tmp2 >= tmp0 tmp20 = tmp2 < tmp2 tmp21 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*2), [XBLOCK, RBLOCK])), tmp20, other=0.0) tmp22 = tmp2 >= tmp2 tmp23 = tmp2 < tmp6 tmp24 = tl.load(in_ptr0 + (tl.broadcast_to(60 + r0, [XBLOCK, RBLOCK])), tmp22, other=0.0) tmp25 = tl.where(tmp20, tmp21, tmp24) tmp26 = tmp18 + tmp25 tmp27 = 3.0 tmp28 = tmp26 / tmp27 tmp30 = tmp29 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp28, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/rb/crbhwfxwz4knodchqf4oud2ojiulhvbli6snxsgr56f42mcvtvqr.py # Topologically Sorted Source Nodes: [norm_28], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # norm_28 => pow_57, sum_29 # Graph fragment: # %pow_57 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_78, 2), kwargs = {}) # %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_57, None), kwargs = {}) triton_per_fused_linalg_vector_norm_12 = async_compile.triton('triton_per_fused_linalg_vector_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_12(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (56 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cs/ccsc3gkgu5deyhztcxdjwui2gavgf62f6nhcxahznhzc4rpkrqgu.py # Topologically Sorted Source Nodes: [new_centroids_15], Original ATen: [aten.stack] # Source node to ATen node mapping: # new_centroids_15 => cat_26 # Graph fragment: # %cat_26 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_34, %select_10, %select_11, %mean_16],), kwargs = {}) triton_poi_fused_stack_13 = async_compile.triton('triton_poi_fused_stack_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 15, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (4 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (8 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = tl.load(in_ptr0 + (12 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (16 + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr0 + (20 + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr0 + (24 + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 + tmp23 tmp25 = tl.load(in_ptr0 + (28 + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 / tmp12 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp19, tmp27, tmp28) tmp30 = tmp0 >= tmp17 tmp31 = tl.full([1], 12, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (32 + ((-8) + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr0 + (36 + ((-8) + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp34 + tmp35 tmp37 = tl.load(in_ptr0 + (40 + ((-8) + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tl.load(in_ptr0 + (44 + ((-8) + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tmp38 + tmp39 tmp41 = tmp40 / tmp12 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp33, tmp41, tmp42) tmp44 = tmp0 >= tmp31 tmp45 = tl.full([1], 16, tl.int64) tmp46 = tmp0 < tmp45 tmp47 = tl.load(in_ptr0 + (48 + ((-12) + x0)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tl.load(in_ptr0 + (52 + ((-12) + x0)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tmp47 + tmp48 tmp50 = tl.load(in_ptr0 + (56 + ((-12) + x0)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp49 + tmp50 tmp52 = 3.0 tmp53 = tmp51 / tmp52 tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype) tmp55 = tl.where(tmp44, tmp53, tmp54) tmp56 = tl.where(tmp33, tmp43, tmp55) tmp57 = tl.where(tmp19, tmp29, tmp56) tmp58 = tl.where(tmp4, tmp15, tmp57) tl.store(out_ptr0 + (x0), tmp58, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ny/cnyzf7svi64n4lep576ucnmgkjgspkxsy67schyxekoirfh3tw6i.py # Topologically Sorted Source Nodes: [log_softmax, log_softmax_1, log_softmax_2, log_softmax_3, L_row, log_softmax_4, log_softmax_5, log_softmax_6, log_softmax_7, L_row_1, log_softmax_8, log_softmax_9, log_softmax_10, log_softmax_11, L_row_2, log_softmax_12, log_softmax_13, log_softmax_14, log_softmax_15], Original ATen: [aten._log_softmax, aten.stack, aten._log_softmax_backward_data] # Source node to ATen node mapping: # L_row => cat_29 # L_row_1 => cat_30 # L_row_2 => cat_31 # log_softmax => amax, exp, sub, sum_33 # log_softmax_1 => amax_1, exp_1, sub_2, sum_34 # log_softmax_10 => amax_10, exp_10, sub_20, sum_43 # log_softmax_11 => amax_11, exp_11, sub_22, sum_44 # log_softmax_12 => amax_12, exp_12, log_12, sub_24, sub_25, sum_45 # log_softmax_13 => amax_13, exp_13, log_13, sub_26, sub_27, sum_46 # log_softmax_14 => amax_14, exp_14, log_14, sub_28, sub_29, sum_47 # log_softmax_15 => amax_15, exp_15, log_15, sub_30, sub_31, sum_48 # log_softmax_2 => amax_2, exp_2, sub_4, sum_35 # log_softmax_3 => amax_3, exp_3, sub_6, sum_36 # log_softmax_4 => amax_4, exp_4, sub_8, sum_37 # log_softmax_5 => amax_5, exp_5, sub_10, sum_38 # log_softmax_6 => amax_6, exp_6, sub_12, sum_39 # log_softmax_7 => amax_7, exp_7, sub_14, sum_40 # log_softmax_8 => amax_8, exp_8, sub_16, sum_41 # log_softmax_9 => amax_9, exp_9, sub_18, sum_42 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_101, [0], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_101, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_104, [0], True), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_104, %amax_1), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [0], True), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_107, [0], True), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_107, %amax_2), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [0], True), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_110, [0], True), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_110, %amax_3), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {}) # %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [0], True), kwargs = {}) # %cat_29 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_16, %unsqueeze_17, %unsqueeze_18, %unsqueeze_19],), kwargs = {}) # %amax_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_113, [0], True), kwargs = {}) # %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_113, %amax_4), kwargs = {}) # %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_8,), kwargs = {}) # %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [0], True), kwargs = {}) # %amax_5 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_116, [0], True), kwargs = {}) # %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_116, %amax_5), kwargs = {}) # %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {}) # %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [0], True), kwargs = {}) # %amax_6 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_119, [0], True), kwargs = {}) # %sub_12 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_119, %amax_6), kwargs = {}) # %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_12,), kwargs = {}) # %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [0], True), kwargs = {}) # %amax_7 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_122, [0], True), kwargs = {}) # %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_122, %amax_7), kwargs = {}) # %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_14,), kwargs = {}) # %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_7, [0], True), kwargs = {}) # %cat_30 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_20, %unsqueeze_21, %unsqueeze_22, %unsqueeze_23],), kwargs = {}) # %amax_8 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_125, [0], True), kwargs = {}) # %sub_16 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_125, %amax_8), kwargs = {}) # %exp_8 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_16,), kwargs = {}) # %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_8, [0], True), kwargs = {}) # %amax_9 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_128, [0], True), kwargs = {}) # %sub_18 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_128, %amax_9), kwargs = {}) # %exp_9 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_18,), kwargs = {}) # %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_9, [0], True), kwargs = {}) # %amax_10 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_131, [0], True), kwargs = {}) # %sub_20 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_131, %amax_10), kwargs = {}) # %exp_10 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_20,), kwargs = {}) # %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_10, [0], True), kwargs = {}) # %amax_11 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_134, [0], True), kwargs = {}) # %sub_22 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_134, %amax_11), kwargs = {}) # %exp_11 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_22,), kwargs = {}) # %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_11, [0], True), kwargs = {}) # %cat_31 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_24, %unsqueeze_25, %unsqueeze_26, %unsqueeze_27],), kwargs = {}) # %amax_12 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_137, [0], True), kwargs = {}) # %sub_24 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_137, %amax_12), kwargs = {}) # %exp_12 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_24,), kwargs = {}) # %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_12, [0], True), kwargs = {}) # %log_12 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_45,), kwargs = {}) # %sub_25 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_24, %log_12), kwargs = {}) # %amax_13 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_140, [0], True), kwargs = {}) # %sub_26 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_140, %amax_13), kwargs = {}) # %exp_13 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_26,), kwargs = {}) # %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_13, [0], True), kwargs = {}) # %log_13 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_46,), kwargs = {}) # %sub_27 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_26, %log_13), kwargs = {}) # %amax_14 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_143, [0], True), kwargs = {}) # %sub_28 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_143, %amax_14), kwargs = {}) # %exp_14 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_28,), kwargs = {}) # %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_14, [0], True), kwargs = {}) # %log_14 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_47,), kwargs = {}) # %sub_29 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_28, %log_14), kwargs = {}) # %amax_15 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%select_146, [0], True), kwargs = {}) # %sub_30 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_146, %amax_15), kwargs = {}) # %exp_15 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_30,), kwargs = {}) # %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_15, [0], True), kwargs = {}) # %log_15 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_48,), kwargs = {}) # %sub_31 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_30, %log_15), kwargs = {}) # %exp_16 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_31,), kwargs = {}) # %exp_17 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_29,), kwargs = {}) # %exp_18 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_27,), kwargs = {}) # %exp_19 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_25,), kwargs = {}) triton_per_fused__log_softmax__log_softmax_backward_data_stack_14 = async_compile.triton('triton_per_fused__log_softmax__log_softmax_backward_data_stack_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: 'i32', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {14: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=(14,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__log_softmax_backward_data_stack_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 30, 'num_reduction': 32, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__log_softmax_backward_data_stack_14(in_ptr0, in_ptr1, in_ptr2, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2 = tl.load(in_ptr1 + (r0), None) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp15 = tl.load(in_ptr1 + (4 + r0), None) tmp26 = tl.load(in_ptr1 + (8 + r0), None) tmp37 = tl.load(in_ptr1 + (12 + r0), None) tmp48 = tl.load(in_ptr1 + (16 + r0), None) tmp59 = tl.load(in_ptr1 + (20 + r0), None) tmp70 = tl.load(in_ptr1 + (24 + r0), None) tmp81 = tl.load(in_ptr1 + (28 + r0), None) tmp92 = tl.load(in_ptr1 + (32 + r0), None) tmp103 = tl.load(in_ptr1 + (36 + r0), None) tmp114 = tl.load(in_ptr1 + (40 + r0), None) tmp125 = tl.load(in_ptr1 + (44 + r0), None) tmp136 = tl.load(in_ptr1 + (48 + r0), None) tmp147 = tl.load(in_ptr1 + (52 + r0), None) tmp158 = tl.load(in_ptr1 + (56 + r0), None) tmp169 = tl.load(in_ptr1 + (60 + r0), None) tmp185 = tl.load(in_ptr1 + (0)) tmp186 = tl.broadcast_to(tmp185, [XBLOCK, RBLOCK]) tmp199 = tl.load(in_ptr1 + (4)) tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK]) tmp213 = tl.load(in_ptr1 + (8)) tmp214 = tl.broadcast_to(tmp213, [XBLOCK, RBLOCK]) tmp226 = tl.load(in_ptr1 + (12)) tmp227 = tl.broadcast_to(tmp226, [XBLOCK, RBLOCK]) tmp239 = tl.load(in_ptr1 + (17)) tmp240 = tl.broadcast_to(tmp239, [XBLOCK, RBLOCK]) tmp249 = tl.load(in_ptr1 + (21)) tmp250 = tl.broadcast_to(tmp249, [XBLOCK, RBLOCK]) tmp259 = tl.load(in_ptr1 + (25)) tmp260 = tl.broadcast_to(tmp259, [XBLOCK, RBLOCK]) tmp269 = tl.load(in_ptr1 + (29)) tmp270 = tl.broadcast_to(tmp269, [XBLOCK, RBLOCK]) tmp282 = tl.load(in_ptr1 + (34)) tmp283 = tl.broadcast_to(tmp282, [XBLOCK, RBLOCK]) tmp292 = tl.load(in_ptr1 + (38)) tmp293 = tl.broadcast_to(tmp292, [XBLOCK, RBLOCK]) tmp302 = tl.load(in_ptr1 + (42)) tmp303 = tl.broadcast_to(tmp302, [XBLOCK, RBLOCK]) tmp312 = tl.load(in_ptr1 + (46)) tmp313 = tl.broadcast_to(tmp312, [XBLOCK, RBLOCK]) tmp3 = tmp1 * tmp2 tmp6 = tmp3 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = triton_helpers.max2(tmp7, 1)[:, None] tmp10 = tmp6 - tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp1 * tmp15 tmp17 = tmp16 + tmp5 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = triton_helpers.max2(tmp18, 1)[:, None] tmp21 = tmp17 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp27 = tmp1 * tmp26 tmp28 = tmp27 + tmp5 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = triton_helpers.max2(tmp29, 1)[:, None] tmp32 = tmp28 - tmp31 tmp33 = tl_math.exp(tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp38 = tmp1 * tmp37 tmp39 = tmp38 + tmp5 tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK]) tmp42 = triton_helpers.max2(tmp40, 1)[:, None] tmp43 = tmp39 - tmp42 tmp44 = tl_math.exp(tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp49 = tmp1 * tmp48 tmp50 = tmp49 + tmp5 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = triton_helpers.max2(tmp51, 1)[:, None] tmp54 = tmp50 - tmp53 tmp55 = tl_math.exp(tmp54) tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.sum(tmp56, 1)[:, None] tmp60 = tmp1 * tmp59 tmp61 = tmp60 + tmp5 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = triton_helpers.max2(tmp62, 1)[:, None] tmp65 = tmp61 - tmp64 tmp66 = tl_math.exp(tmp65) tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.sum(tmp67, 1)[:, None] tmp71 = tmp1 * tmp70 tmp72 = tmp71 + tmp5 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = triton_helpers.max2(tmp73, 1)[:, None] tmp76 = tmp72 - tmp75 tmp77 = tl_math.exp(tmp76) tmp78 = tl.broadcast_to(tmp77, [XBLOCK, RBLOCK]) tmp80 = tl.sum(tmp78, 1)[:, None] tmp82 = tmp1 * tmp81 tmp83 = tmp82 + tmp5 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = triton_helpers.max2(tmp84, 1)[:, None] tmp87 = tmp83 - tmp86 tmp88 = tl_math.exp(tmp87) tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = tl.sum(tmp89, 1)[:, None] tmp93 = tmp1 * tmp92 tmp94 = tmp93 + tmp5 tmp95 = tl.broadcast_to(tmp94, [XBLOCK, RBLOCK]) tmp97 = triton_helpers.max2(tmp95, 1)[:, None] tmp98 = tmp94 - tmp97 tmp99 = tl_math.exp(tmp98) tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = tl.sum(tmp100, 1)[:, None] tmp104 = tmp1 * tmp103 tmp105 = tmp104 + tmp5 tmp106 = tl.broadcast_to(tmp105, [XBLOCK, RBLOCK]) tmp108 = triton_helpers.max2(tmp106, 1)[:, None] tmp109 = tmp105 - tmp108 tmp110 = tl_math.exp(tmp109) tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK]) tmp113 = tl.sum(tmp111, 1)[:, None] tmp115 = tmp1 * tmp114 tmp116 = tmp115 + tmp5 tmp117 = tl.broadcast_to(tmp116, [XBLOCK, RBLOCK]) tmp119 = triton_helpers.max2(tmp117, 1)[:, None] tmp120 = tmp116 - tmp119 tmp121 = tl_math.exp(tmp120) tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK]) tmp124 = tl.sum(tmp122, 1)[:, None] tmp126 = tmp1 * tmp125 tmp127 = tmp126 + tmp5 tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK]) tmp130 = triton_helpers.max2(tmp128, 1)[:, None] tmp131 = tmp127 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tl.broadcast_to(tmp132, [XBLOCK, RBLOCK]) tmp135 = tl.sum(tmp133, 1)[:, None] tmp137 = tmp1 * tmp136 tmp138 = tmp137 + tmp5 tmp139 = tl.broadcast_to(tmp138, [XBLOCK, RBLOCK]) tmp141 = triton_helpers.max2(tmp139, 1)[:, None] tmp142 = tmp138 - tmp141 tmp143 = tl_math.exp(tmp142) tmp144 = tl.broadcast_to(tmp143, [XBLOCK, RBLOCK]) tmp146 = tl.sum(tmp144, 1)[:, None] tmp148 = tmp1 * tmp147 tmp149 = tmp148 + tmp5 tmp150 = tl.broadcast_to(tmp149, [XBLOCK, RBLOCK]) tmp152 = triton_helpers.max2(tmp150, 1)[:, None] tmp153 = tmp149 - tmp152 tmp154 = tl_math.exp(tmp153) tmp155 = tl.broadcast_to(tmp154, [XBLOCK, RBLOCK]) tmp157 = tl.sum(tmp155, 1)[:, None] tmp159 = tmp1 * tmp158 tmp160 = tmp159 + tmp5 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = triton_helpers.max2(tmp161, 1)[:, None] tmp164 = tmp160 - tmp163 tmp165 = tl_math.exp(tmp164) tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK]) tmp168 = tl.sum(tmp166, 1)[:, None] tmp170 = tmp1 * tmp169 tmp171 = tmp170 + tmp5 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = triton_helpers.max2(tmp172, 1)[:, None] tmp175 = tmp171 - tmp174 tmp176 = tl_math.exp(tmp175) tmp177 = tl.broadcast_to(tmp176, [XBLOCK, RBLOCK]) tmp179 = tl.sum(tmp177, 1)[:, None] tmp180 = r0 tmp181 = tl.full([1, 1], 0, tl.int64) tmp182 = tmp180 >= tmp181 tmp183 = tl.full([1, 1], 1, tl.int64) tmp184 = tmp180 < tmp183 tmp187 = tmp1 * tmp186 tmp188 = tmp187 + tmp5 tmp189 = tmp188 - tmp9 tmp190 = tl_math.log(tmp14) tmp191 = tmp189 - tmp190 tmp192 = -tmp191 tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype) tmp194 = tl.where(tmp184, tmp192, tmp193) tmp195 = tmp180 >= tmp183 tmp196 = tl.full([1, 1], 2, tl.int64) tmp197 = tmp180 < tmp196 tmp198 = tmp195 & tmp197 tmp201 = tmp1 * tmp200 tmp202 = tmp201 + tmp5 tmp203 = tmp202 - tmp20 tmp204 = tl_math.log(tmp25) tmp205 = tmp203 - tmp204 tmp206 = -tmp205 tmp207 = tl.full(tmp206.shape, 0.0, tmp206.dtype) tmp208 = tl.where(tmp198, tmp206, tmp207) tmp209 = tmp180 >= tmp196 tmp210 = tl.full([1, 1], 3, tl.int64) tmp211 = tmp180 < tmp210 tmp212 = tmp209 & tmp211 tmp215 = tmp1 * tmp214 tmp216 = tmp215 + tmp5 tmp217 = tmp216 - tmp31 tmp218 = tl_math.log(tmp36) tmp219 = tmp217 - tmp218 tmp220 = -tmp219 tmp221 = tl.full(tmp220.shape, 0.0, tmp220.dtype) tmp222 = tl.where(tmp212, tmp220, tmp221) tmp223 = tmp180 >= tmp210 tmp224 = tl.full([1, 1], 4, tl.int64) tmp225 = tmp180 < tmp224 tmp228 = tmp1 * tmp227 tmp229 = tmp228 + tmp5 tmp230 = tmp229 - tmp42 tmp231 = tl_math.log(tmp47) tmp232 = tmp230 - tmp231 tmp233 = -tmp232 tmp234 = tl.full(tmp233.shape, 0.0, tmp233.dtype) tmp235 = tl.where(tmp223, tmp233, tmp234) tmp236 = tl.where(tmp212, tmp222, tmp235) tmp237 = tl.where(tmp198, tmp208, tmp236) tmp238 = tl.where(tmp184, tmp194, tmp237) tmp241 = tmp1 * tmp240 tmp242 = tmp241 + tmp5 tmp243 = tmp242 - tmp53 tmp244 = tl_math.log(tmp58) tmp245 = tmp243 - tmp244 tmp246 = -tmp245 tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype) tmp248 = tl.where(tmp184, tmp246, tmp247) tmp251 = tmp1 * tmp250 tmp252 = tmp251 + tmp5 tmp253 = tmp252 - tmp64 tmp254 = tl_math.log(tmp69) tmp255 = tmp253 - tmp254 tmp256 = -tmp255 tmp257 = tl.full(tmp256.shape, 0.0, tmp256.dtype) tmp258 = tl.where(tmp198, tmp256, tmp257) tmp261 = tmp1 * tmp260 tmp262 = tmp261 + tmp5 tmp263 = tmp262 - tmp75 tmp264 = tl_math.log(tmp80) tmp265 = tmp263 - tmp264 tmp266 = -tmp265 tmp267 = tl.full(tmp266.shape, 0.0, tmp266.dtype) tmp268 = tl.where(tmp212, tmp266, tmp267) tmp271 = tmp1 * tmp270 tmp272 = tmp271 + tmp5 tmp273 = tmp272 - tmp86 tmp274 = tl_math.log(tmp91) tmp275 = tmp273 - tmp274 tmp276 = -tmp275 tmp277 = tl.full(tmp276.shape, 0.0, tmp276.dtype) tmp278 = tl.where(tmp223, tmp276, tmp277) tmp279 = tl.where(tmp212, tmp268, tmp278) tmp280 = tl.where(tmp198, tmp258, tmp279) tmp281 = tl.where(tmp184, tmp248, tmp280) tmp284 = tmp1 * tmp283 tmp285 = tmp284 + tmp5 tmp286 = tmp285 - tmp97 tmp287 = tl_math.log(tmp102) tmp288 = tmp286 - tmp287 tmp289 = -tmp288 tmp290 = tl.full(tmp289.shape, 0.0, tmp289.dtype) tmp291 = tl.where(tmp184, tmp289, tmp290) tmp294 = tmp1 * tmp293 tmp295 = tmp294 + tmp5 tmp296 = tmp295 - tmp108 tmp297 = tl_math.log(tmp113) tmp298 = tmp296 - tmp297 tmp299 = -tmp298 tmp300 = tl.full(tmp299.shape, 0.0, tmp299.dtype) tmp301 = tl.where(tmp198, tmp299, tmp300) tmp304 = tmp1 * tmp303 tmp305 = tmp304 + tmp5 tmp306 = tmp305 - tmp119 tmp307 = tl_math.log(tmp124) tmp308 = tmp306 - tmp307 tmp309 = -tmp308 tmp310 = tl.full(tmp309.shape, 0.0, tmp309.dtype) tmp311 = tl.where(tmp212, tmp309, tmp310) tmp314 = tmp1 * tmp313 tmp315 = tmp314 + tmp5 tmp316 = tmp315 - tmp130 tmp317 = tl_math.log(tmp135) tmp318 = tmp316 - tmp317 tmp319 = -tmp318 tmp320 = tl.full(tmp319.shape, 0.0, tmp319.dtype) tmp321 = tl.where(tmp223, tmp319, tmp320) tmp322 = tl.where(tmp212, tmp311, tmp321) tmp323 = tl.where(tmp198, tmp301, tmp322) tmp324 = tl.where(tmp184, tmp291, tmp323) tmp325 = tl_math.log(tmp146) tmp326 = tmp142 - tmp325 tmp327 = tl_math.exp(tmp326) tmp328 = tl_math.log(tmp157) tmp329 = tmp153 - tmp328 tmp330 = tl_math.exp(tmp329) tmp331 = tl_math.log(tmp168) tmp332 = tmp164 - tmp331 tmp333 = tl_math.exp(tmp332) tmp334 = tl_math.log(tmp179) tmp335 = tmp175 - tmp334 tmp336 = tl_math.exp(tmp335) tl.store(out_ptr32 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp238, None) tl.store(out_ptr33 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp281, None) tl.store(out_ptr34 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp324, None) tl.store(out_ptr35 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp326, None) tl.store(out_ptr36 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp327, None) tl.store(out_ptr37 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp329, None) tl.store(out_ptr38 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp330, None) tl.store(out_ptr39 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp332, None) tl.store(out_ptr40 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp333, None) tl.store(out_ptr41 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp335, None) tl.store(out_ptr42 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp336, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/p3/cp3hem6ijvssri53og7f2pda7nauqlpvhy5sv4mndarqqqeb6qmi.py # Topologically Sorted Source Nodes: [L_row_3], Original ATen: [aten.stack] # Source node to ATen node mapping: # L_row_3 => cat_32 # Graph fragment: # %cat_32 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_28, %unsqueeze_29, %unsqueeze_30, %unsqueeze_31],), kwargs = {}) triton_poi_fused_stack_15 = async_compile.triton('triton_poi_fused_stack_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + (3)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp14 = tl.load(in_ptr1 + (3)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp23 = tl.load(in_ptr2 + (3)) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp31 = tl.load(in_ptr3 + (3)) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = -tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 2, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp16 = -tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 3, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp25 = -tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 4, tl.int64) tmp30 = tmp0 < tmp29 tmp33 = -tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp28, tmp33, tmp34) tmp36 = tl.where(tmp22, tmp27, tmp35) tmp37 = tl.where(tmp13, tmp18, tmp36) tmp38 = tl.where(tmp4, tmp9, tmp37) tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/it/citbft6scgowtafcadbm67lvmjscghqookf2irszk6yfd3lrovgj.py # Topologically Sorted Source Nodes: [mean_17], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean_17 => mean_17 # Graph fragment: # %mean_17 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%view_17,), kwargs = {}) triton_per_fused_mean_16 = async_compile.triton('triton_per_fused_mean_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_16(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 16.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (), ()) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, ), (1, ), torch.float32) buf17 = empty_strided_cuda((16, ), (1, ), torch.float32) buf21 = empty_strided_cuda((16, ), (1, ), torch.float32) buf38 = empty_strided_cuda((16, ), (1, ), torch.float32) buf42 = empty_strided_cuda((16, ), (1, ), torch.float32) buf59 = empty_strided_cuda((16, ), (1, ), torch.float32) buf63 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [new_centroids, new_centroids_3, new_centroids_4, new_centroids_7, new_centroids_8, new_centroids_11, new_centroids_12], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(primals_1, buf0, buf17, buf21, buf38, buf42, buf59, buf63, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((), (), torch.float32) buf7 = empty_strided_cuda((16, ), (1, ), torch.float32) buf3 = reinterpret_tensor(buf7, (4, ), (1, ), 0) # alias # Topologically Sorted Source Nodes: [norm, excl_2, excl_3], Original ATen: [aten.linalg_vector_norm, aten.cat, aten.mean] triton_per_fused_cat_linalg_vector_norm_mean_1.run(primals_1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0) buf23 = empty_strided_cuda((), (), torch.float32) buf4 = reinterpret_tensor(buf7, (4, ), (1, ), 4) # alias buf14 = empty_strided_cuda((16, ), (1, ), torch.float32) buf11 = reinterpret_tensor(buf14, (4, ), (1, ), 4) # alias buf28 = empty_strided_cuda((16, ), (1, ), torch.float32) buf24 = reinterpret_tensor(buf28, (4, ), (1, ), 4) # alias # Topologically Sorted Source Nodes: [new_centroids_1, new_centroids_2, norm_8, excl_10, excl_11], Original ATen: [aten.stack, aten.linalg_vector_norm, aten.cat, aten.mean] triton_per_fused_cat_linalg_vector_norm_mean_stack_2.run(primals_1, buf23, buf4, buf11, buf24, 1, 4, grid=grid(1), stream=stream0) buf44 = empty_strided_cuda((), (), torch.float32) buf5 = reinterpret_tensor(buf7, (4, ), (1, ), 8) # alias buf12 = reinterpret_tensor(buf14, (4, ), (1, ), 8) # alias buf26 = reinterpret_tensor(buf28, (4, ), (1, ), 8) # alias buf35 = empty_strided_cuda((16, ), (1, ), torch.float32) buf33 = reinterpret_tensor(buf35, (4, ), (1, ), 8) # alias buf49 = empty_strided_cuda((16, ), (1, ), torch.float32) buf45 = reinterpret_tensor(buf49, (4, ), (1, ), 8) # alias buf51 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [new_centroids_1, new_centroids_2, new_centroids_5, new_centroids_6, norm_16, excl_18, excl_19, norm_18], Original ATen: [aten.stack, aten.linalg_vector_norm, aten.cat, aten.mean] triton_per_fused_cat_linalg_vector_norm_mean_stack_3.run(primals_1, buf44, buf5, buf12, buf26, buf33, buf45, buf51, 1, 4, grid=grid(1), stream=stream0) buf65 = empty_strided_cuda((), (), torch.float32) buf6 = reinterpret_tensor(buf7, (4, ), (1, ), 12) # alias buf13 = reinterpret_tensor(buf14, (4, ), (1, ), 12) # alias buf27 = reinterpret_tensor(buf28, (4, ), (1, ), 12) # alias buf34 = reinterpret_tensor(buf35, (4, ), (1, ), 12) # alias buf48 = reinterpret_tensor(buf49, (4, ), (1, ), 12) # alias buf56 = empty_strided_cuda((16, ), (1, ), torch.float32) buf55 = reinterpret_tensor(buf56, (4, ), (1, ), 12) # alias buf70 = empty_strided_cuda((16, ), (1, ), torch.float32) buf66 = reinterpret_tensor(buf70, (4, ), (1, ), 12) # alias # Topologically Sorted Source Nodes: [new_centroids_1, new_centroids_2, new_centroids_5, new_centroids_6, new_centroids_9, new_centroids_10, norm_24, excl_26, excl_27], Original ATen: [aten.stack, aten.linalg_vector_norm, aten.cat, aten.mean] triton_per_fused_cat_linalg_vector_norm_mean_stack_4.run(primals_1, buf65, buf6, buf13, buf27, buf34, buf48, buf55, buf66, 1, 4, grid=grid(1), stream=stream0) del buf3 del buf4 del buf5 del buf6 buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [norm_2], Original ATen: [aten.linalg_vector_norm] triton_per_fused_linalg_vector_norm_5.run(primals_1, buf9, 1, 4, grid=grid(1), stream=stream0) buf16 = empty_strided_cuda((), (), torch.float32) buf19 = empty_strided_cuda((), (), torch.float32) buf10 = reinterpret_tensor(buf14, (4, ), (1, ), 0) # alias buf25 = reinterpret_tensor(buf28, (4, ), (1, ), 0) # alias buf32 = reinterpret_tensor(buf35, (4, ), (1, ), 0) # alias buf46 = reinterpret_tensor(buf49, (4, ), (1, ), 0) # alias buf53 = reinterpret_tensor(buf56, (4, ), (1, ), 0) # alias buf67 = reinterpret_tensor(buf70, (4, ), (1, ), 0) # alias buf77 = empty_strided_cuda((16, ), (1, ), torch.float32) buf74 = reinterpret_tensor(buf77, (4, ), (1, ), 0) # alias # Topologically Sorted Source Nodes: [excl_4, excl_5, norm_4, norm_6, new_centroids_5, new_centroids_6, new_centroids_9, new_centroids_10, new_centroids_13, new_centroids_14], Original ATen: [aten.cat, aten.mean, aten.linalg_vector_norm, aten.stack] triton_per_fused_cat_linalg_vector_norm_mean_stack_6.run(primals_1, buf16, buf19, buf10, buf25, buf32, buf46, buf53, buf67, buf74, 1, 4, grid=grid(1), stream=stream0) del buf10 del buf11 del buf12 del buf13 buf15 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 8), reinterpret_tensor(buf14, (4, 4), (1, 4), 0), out=buf15) buf18 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 12), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), out=buf18) buf84 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf20 = reinterpret_tensor(buf84, (4, 4), (4, 1), 0) # alias # Topologically Sorted Source Nodes: [cs_row], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf1, buf2, buf0, buf8, buf9, buf7, buf15, buf16, buf14, buf18, buf19, buf17, buf20, 16, grid=grid(16), stream=stream0) del buf0 del buf14 del buf16 del buf17 del buf7 buf22 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [mm_4], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 16), reinterpret_tensor(buf21, (4, 4), (1, 4), 0), out=buf22) del buf24 del buf25 del buf26 del buf27 buf29 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [mm_5], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 20), reinterpret_tensor(buf28, (4, 4), (1, 4), 0), out=buf29) buf30 = buf9; del buf9 # reuse buf37 = buf2; del buf2 # reuse buf40 = buf19; del buf19 # reuse buf31 = reinterpret_tensor(buf35, (4, ), (1, ), 4) # alias buf47 = reinterpret_tensor(buf49, (4, ), (1, ), 4) # alias buf54 = reinterpret_tensor(buf56, (4, ), (1, ), 4) # alias buf68 = reinterpret_tensor(buf70, (4, ), (1, ), 4) # alias buf75 = reinterpret_tensor(buf77, (4, ), (1, ), 4) # alias # Topologically Sorted Source Nodes: [norm_10, excl_12, excl_13, norm_12, norm_14, new_centroids_9, new_centroids_10, new_centroids_13, new_centroids_14], Original ATen: [aten.linalg_vector_norm, aten.cat, aten.mean, aten.stack] triton_per_fused_cat_linalg_vector_norm_mean_stack_8.run(primals_1, buf30, buf37, buf40, buf31, buf47, buf54, buf68, buf75, 1, 4, grid=grid(1), stream=stream0) del buf31 del buf32 del buf33 del buf34 buf36 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [mm_6], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 24), reinterpret_tensor(buf35, (4, 4), (1, 4), 0), out=buf36) buf39 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mm_7], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 28), reinterpret_tensor(buf38, (4, 4), (1, 4), 0), out=buf39) buf41 = reinterpret_tensor(buf84, (4, 4), (4, 1), 16) # alias # Topologically Sorted Source Nodes: [cs_row_1], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf22, buf23, buf21, buf29, buf30, buf28, buf36, buf37, buf35, buf39, buf40, buf38, buf41, 16, grid=grid(16), stream=stream0) del buf21 del buf23 del buf28 del buf30 del buf35 del buf38 buf43 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [mm_8], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 32), reinterpret_tensor(buf42, (4, 4), (1, 4), 0), out=buf43) del buf45 del buf46 del buf47 del buf48 buf50 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [mm_9], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 36), reinterpret_tensor(buf49, (4, 4), (1, 4), 0), out=buf50) buf58 = buf40; del buf40 # reuse buf61 = buf37; del buf37 # reuse buf52 = reinterpret_tensor(buf56, (4, ), (1, ), 8) # alias buf69 = reinterpret_tensor(buf70, (4, ), (1, ), 8) # alias buf76 = reinterpret_tensor(buf77, (4, ), (1, ), 8) # alias # Topologically Sorted Source Nodes: [excl_20, excl_21, norm_20, norm_22, new_centroids_13, new_centroids_14], Original ATen: [aten.cat, aten.mean, aten.linalg_vector_norm, aten.stack] triton_per_fused_cat_linalg_vector_norm_mean_stack_9.run(primals_1, buf58, buf61, buf52, buf69, buf76, 1, 4, grid=grid(1), stream=stream0) del buf52 del buf53 del buf54 del buf55 buf57 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [mm_10], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 40), reinterpret_tensor(buf56, (4, 4), (1, 4), 0), out=buf57) buf60 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [mm_11], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 44), reinterpret_tensor(buf59, (4, 4), (1, 4), 0), out=buf60) buf62 = reinterpret_tensor(buf84, (4, 4), (4, 1), 32) # alias # Topologically Sorted Source Nodes: [cs_row_2], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf43, buf44, buf42, buf50, buf51, buf49, buf57, buf58, buf56, buf60, buf61, buf59, buf62, 16, grid=grid(16), stream=stream0) del buf42 del buf44 del buf49 del buf56 buf64 = buf60; del buf60 # reuse # Topologically Sorted Source Nodes: [mm_12], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 48), reinterpret_tensor(buf63, (4, 4), (1, 4), 0), out=buf64) del buf66 del buf67 del buf68 del buf69 buf71 = buf57; del buf57 # reuse # Topologically Sorted Source Nodes: [mm_13], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 52), reinterpret_tensor(buf70, (4, 4), (1, 4), 0), out=buf71) buf72 = buf61; del buf61 # reuse # Topologically Sorted Source Nodes: [norm_26], Original ATen: [aten.linalg_vector_norm] triton_per_fused_linalg_vector_norm_10.run(primals_1, buf72, 1, 4, grid=grid(1), stream=stream0) buf73 = reinterpret_tensor(buf77, (4, ), (1, ), 12) # alias buf82 = buf58; del buf58 # reuse # Topologically Sorted Source Nodes: [excl_28, excl_29, norm_30], Original ATen: [aten.cat, aten.mean, aten.linalg_vector_norm] triton_per_fused_cat_linalg_vector_norm_mean_11.run(primals_1, buf73, buf82, 1, 4, grid=grid(1), stream=stream0) del buf73 del buf74 del buf75 del buf76 buf78 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [mm_14], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 56), reinterpret_tensor(buf77, (4, 4), (1, 4), 0), out=buf78) buf79 = buf51; del buf51 # reuse # Topologically Sorted Source Nodes: [norm_28], Original ATen: [aten.linalg_vector_norm] triton_per_fused_linalg_vector_norm_12.run(primals_1, buf79, 1, 4, grid=grid(1), stream=stream0) buf80 = buf59; del buf59 # reuse # Topologically Sorted Source Nodes: [new_centroids_15], Original ATen: [aten.stack] triton_poi_fused_stack_13.run(primals_1, buf80, 16, grid=grid(16), stream=stream0) buf81 = buf43; del buf43 # reuse # Topologically Sorted Source Nodes: [mm_15], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 60), reinterpret_tensor(buf80, (4, 4), (1, 4), 0), out=buf81) del primals_1 buf83 = reinterpret_tensor(buf84, (4, 4), (4, 1), 48) # alias # Topologically Sorted Source Nodes: [cs_row_3], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf64, buf65, buf63, buf71, buf72, buf70, buf78, buf79, buf77, buf81, buf82, buf80, buf83, 16, grid=grid(16), stream=stream0) del buf63 del buf65 del buf70 del buf72 del buf77 del buf79 buf125 = buf80; del buf80 # reuse buf93 = reinterpret_tensor(buf125, (4, ), (1, ), 0) # alias buf102 = reinterpret_tensor(buf125, (4, ), (1, ), 4) # alias buf111 = reinterpret_tensor(buf125, (4, ), (1, ), 8) # alias buf114 = reinterpret_tensor(buf81, (4, ), (1, ), 0); del buf81 # reuse buf130 = reinterpret_tensor(buf78, (4, ), (1, ), 0); del buf78 # reuse buf117 = reinterpret_tensor(buf71, (4, ), (1, ), 0); del buf71 # reuse buf129 = reinterpret_tensor(buf64, (4, ), (1, ), 0); del buf64 # reuse buf120 = empty_strided_cuda((4, ), (1, ), torch.float32) buf128 = empty_strided_cuda((4, ), (1, ), torch.float32) buf123 = empty_strided_cuda((4, ), (1, ), torch.float32) buf127 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [log_softmax, log_softmax_1, log_softmax_2, log_softmax_3, L_row, log_softmax_4, log_softmax_5, log_softmax_6, log_softmax_7, L_row_1, log_softmax_8, log_softmax_9, log_softmax_10, log_softmax_11, L_row_2, log_softmax_12, log_softmax_13, log_softmax_14, log_softmax_15], Original ATen: [aten._log_softmax, aten.stack, aten._log_softmax_backward_data] triton_per_fused__log_softmax__log_softmax_backward_data_stack_14.run(primals_2, buf84, primals_3, buf93, buf102, buf111, buf114, buf130, buf117, buf129, buf120, buf128, buf123, buf127, 1, 4, grid=grid(1), stream=stream0) buf124 = reinterpret_tensor(buf125, (4, ), (1, ), 12) # alias # Topologically Sorted Source Nodes: [L_row_3], Original ATen: [aten.stack] triton_poi_fused_stack_15.run(buf114, buf117, buf120, buf123, buf124, 4, grid=grid(4), stream=stream0) del buf114 del buf117 del buf120 del buf123 buf126 = buf82; del buf82 # reuse buf131 = buf126; del buf126 # reuse # Topologically Sorted Source Nodes: [mean_17], Original ATen: [aten.mean] triton_per_fused_mean_16.run(buf131, buf125, 1, 16, grid=grid(1), stream=stream0) del buf102 del buf111 del buf124 del buf125 del buf93 return (buf131, primals_2, primals_3, buf84, buf127, buf128, buf129, buf130, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions class GE2ELoss(nn.Module): def __init__(self, init_w=10.0, init_b=-5.0, loss_method='softmax'): """ Implementation of the Generalized End-to-End loss defined in https://arxiv.org/abs/1710.10467 [1] Accepts an input of size (N, M, D) where N is the number of speakers in the batch, M is the number of utterances per speaker, and D is the dimensionality of the embedding vector (e.g. d-vector) Args: - init_w (float): defines the initial value of w in Equation (5) of [1] - init_b (float): definies the initial value of b in Equation (5) of [1] """ super(GE2ELoss, self).__init__() self.w = nn.Parameter(torch.tensor(init_w)) self.b = nn.Parameter(torch.tensor(init_b)) self.loss_method = loss_method assert self.loss_method in ['softmax', 'contrast'] if self.loss_method == 'softmax': self.embed_loss = self.embed_loss_softmax if self.loss_method == 'contrast': self.embed_loss = self.embed_loss_contrast def calc_new_centroids(self, dvecs, centroids, spkr, utt): """ Calculates the new centroids excluding the reference utterance """ excl = torch.cat((dvecs[spkr, :utt], dvecs[spkr, utt + 1:])) excl = torch.mean(excl, 0) new_centroids = [] for i, centroid in enumerate(centroids): if i == spkr: new_centroids.append(excl) else: new_centroids.append(centroid) return torch.stack(new_centroids) def calc_cosine_sim(self, dvecs, centroids): """ Make the cosine similarity matrix with dims (N,M,N) """ cos_sim_matrix = [] for spkr_idx, speaker in enumerate(dvecs): cs_row = [] for utt_idx, utterance in enumerate(speaker): new_centroids = self.calc_new_centroids(dvecs, centroids, spkr_idx, utt_idx) cs_row.append(torch.clamp(torch.mm(utterance.unsqueeze(1). transpose(0, 1), new_centroids.transpose(0, 1)) / ( torch.norm(utterance) * torch.norm(new_centroids, dim=1 )), 1e-06)) cs_row = torch.cat(cs_row, dim=0) cos_sim_matrix.append(cs_row) return torch.stack(cos_sim_matrix) def embed_loss_softmax(self, dvecs, cos_sim_matrix): """ Calculates the loss on each embedding $L(e_{ji})$ by taking softmax """ N, M, _ = dvecs.shape L = [] for j in range(N): L_row = [] for i in range(M): L_row.append(-F.log_softmax(cos_sim_matrix[j, i], 0)[j]) L_row = torch.stack(L_row) L.append(L_row) return torch.stack(L) def embed_loss_contrast(self, dvecs, cos_sim_matrix): """ Calculates the loss on each embedding $L(e_{ji})$ by contrast loss with closest centroid """ N, M, _ = dvecs.shape L = [] for j in range(N): L_row = [] for i in range(M): centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i]) excl_centroids_sigmoids = torch.cat((centroids_sigmoids[:j], centroids_sigmoids[j + 1:])) L_row.append(1.0 - torch.sigmoid(cos_sim_matrix[j, i, j]) + torch.max(excl_centroids_sigmoids)) L_row = torch.stack(L_row) L.append(L_row) return torch.stack(L) def forward(self, dvecs): """ Calculates the GE2E loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats) """ centroids = torch.mean(dvecs, 1) cos_sim_matrix = self.calc_cosine_sim(dvecs, centroids) torch.clamp(self.w, 1e-06) cos_sim_matrix = self.w * cos_sim_matrix + self.b L = self.embed_loss(dvecs, cos_sim_matrix) return L.mean() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F from torch.functional import F from torch.nn import functional as F import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (8 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (12 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = 3.0 tmp11 = tmp9 / tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 8, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tmp14 & tmp16 tmp18 = tl.load(in_ptr0 + (16 + (-4 + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.load(in_ptr0 + (20 + (-4 + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tl.load(in_ptr0 + (24 + (-4 + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr0 + (28 + (-4 + x0)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 + tmp23 tmp25 = 4.0 tmp26 = tmp24 / tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp17, tmp26, tmp27) tmp29 = tmp0 >= tmp15 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr0 + (32 + (-8 + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tl.load(in_ptr0 + (36 + (-8 + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tmp33 + tmp34 tmp36 = tl.load(in_ptr0 + (40 + (-8 + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tl.load(in_ptr0 + (44 + (-8 + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 + tmp38 tmp40 = tmp39 / tmp25 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp32, tmp40, tmp41) tmp43 = tmp0 >= tmp30 tl.full([1], 16, tl.int64) tmp46 = tl.load(in_ptr0 + (48 + (-12 + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr0 + (52 + (-12 + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp46 + tmp47 tmp49 = tl.load(in_ptr0 + (56 + (-12 + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tmp48 + tmp49 tmp51 = tl.load(in_ptr0 + (60 + (-12 + x0)), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 + tmp51 tmp53 = tmp52 / tmp25 tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype) tmp55 = tl.where(tmp43, tmp53, tmp54) tmp56 = tl.where(tmp32, tmp42, tmp55) tmp57 = tl.where(tmp17, tmp28, tmp56) tmp58 = tl.where(tmp4, tmp13, tmp57) tmp59 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp60 = tmp59 + tmp5 tmp61 = tmp60 + tmp6 tmp62 = tmp61 / tmp10 tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype) tmp64 = tl.where(tmp4, tmp62, tmp63) tmp65 = tl.where(tmp4, tmp64, tmp57) tmp66 = tmp61 + tmp8 tmp67 = tmp66 / tmp25 tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype) tmp69 = tl.where(tmp4, tmp67, tmp68) tmp70 = tmp19 + tmp21 tmp71 = tmp70 + tmp23 tmp72 = tmp71 / tmp10 tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype) tmp74 = tl.where(tmp17, tmp72, tmp73) tmp75 = tl.where(tmp17, tmp74, tmp56) tmp76 = tl.where(tmp4, tmp69, tmp75) tmp77 = tmp22 / tmp10 tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype) tmp79 = tl.where(tmp17, tmp77, tmp78) tmp80 = tl.where(tmp17, tmp79, tmp56) tmp81 = tl.where(tmp4, tmp69, tmp80) tmp82 = tmp34 + tmp36 tmp83 = tmp82 + tmp38 tmp84 = tmp83 / tmp10 tmp85 = tl.full(tmp84.shape, 0.0, tmp84.dtype) tmp86 = tl.where(tmp32, tmp84, tmp85) tmp87 = tl.where(tmp32, tmp86, tmp55) tmp88 = tl.where(tmp17, tmp28, tmp87) tmp89 = tl.where(tmp4, tmp69, tmp88) tmp90 = tmp37 / tmp10 tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype) tmp92 = tl.where(tmp32, tmp90, tmp91) tmp93 = tl.where(tmp32, tmp92, tmp55) tmp94 = tl.where(tmp17, tmp28, tmp93) tmp95 = tl.where(tmp4, tmp69, tmp94) tmp96 = tmp47 + tmp49 tmp97 = tmp96 + tmp51 tmp98 = tmp97 / tmp10 tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype) tmp100 = tl.where(tmp43, tmp98, tmp99) tmp101 = tl.where(tmp32, tmp42, tmp100) tmp102 = tl.where(tmp17, tmp28, tmp101) tmp103 = tl.where(tmp4, tmp69, tmp102) tl.store(out_ptr0 + x0, tmp58, xmask) tl.store(out_ptr1 + x0, tmp65, xmask) tl.store(out_ptr2 + x0, tmp76, xmask) tl.store(out_ptr3 + x0, tmp81, xmask) tl.store(out_ptr4 + x0, tmp89, xmask) tl.store(out_ptr5 + x0, tmp95, xmask) tl.store(out_ptr6 + x0, tmp103, xmask) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = tl.full([1, 1], 0, tl.int64) tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tl.load(in_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, other=0.0) tmp10 = tmp5 >= tmp7 tl.full([1, 1], 3, tl.int64) tmp13 = tl.load(in_ptr0 + tl.broadcast_to(8 + r0 + 4 * -1, [XBLOCK, RBLOCK]), tmp10, other=0.0) tmp14 = tl.where(tmp8, tmp9, tmp13) tmp16 = tmp7 < tmp7 tmp17 = tl.load(in_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp16, other=0.0) tmp18 = tmp7 >= tmp7 tmp20 = tl.load(in_ptr0 + tl.broadcast_to(8 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp18, other=0.0) tmp21 = tl.where(tmp16, tmp17, tmp20) tmp22 = tmp14 + tmp21 tmp23 = tl.full([1, 1], 2, tl.int64) tmp25 = tmp23 < tmp7 tmp26 = tl.load(in_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp25, other=0.0) tmp27 = tmp23 >= tmp7 tmp29 = tl.load(in_ptr0 + tl.broadcast_to(8 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp27, other=0.0) tmp30 = tl.where(tmp25, tmp26, tmp29) tmp31 = tmp22 + tmp30 tmp32 = 3.0 tmp33 = tmp31 / tmp32 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp33, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (16 + r0), None) tmp5 = tl.load(in_ptr0 + (20 + r0), None) tmp7 = tl.load(in_ptr0 + (24 + r0), None) tmp9 = tl.load(in_ptr0 + (28 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp0 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tl.full([1, 1], 0, tl.int64) tmp15 = tl.full([1, 1], 1, tl.int64) tmp16 = tmp13 < tmp15 tmp17 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0, [XBLOCK, RBLOCK]), tmp16, other=0.0) tmp18 = tmp13 >= tmp15 tl.full([1, 1], 3, tl.int64) tmp21 = tl.load(in_ptr0 + tl.broadcast_to(24 + r0 + 4 * -1, [XBLOCK, RBLOCK]), tmp18, other=0.0) tmp22 = tl.where(tmp16, tmp17, tmp21) tmp24 = tmp15 < tmp15 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tmp15 >= tmp15 tmp28 = tl.load(in_ptr0 + tl.broadcast_to(24 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp26, other=0.0) tmp29 = tl.where(tmp24, tmp25, tmp28) tmp30 = tmp22 + tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp33 = tmp31 < tmp15 tmp34 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0, [XBLOCK, RBLOCK]), tmp33, other=0.0) tmp35 = tmp31 >= tmp15 tmp37 = tl.load(in_ptr0 + tl.broadcast_to(24 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp35, other=0.0) tmp38 = tl.where(tmp33, tmp34, tmp37) tmp39 = tmp30 + tmp38 tmp40 = 3.0 tmp41 = tmp39 / tmp40 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp41, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (32 + r0), None) tmp5 = tl.load(in_ptr0 + (36 + r0), None) tmp7 = tl.load(in_ptr0 + (40 + r0), None) tmp9 = tl.load(in_ptr0 + (44 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp0 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tl.full([1, 1], 0, tl.int64) tmp15 = tl.full([1, 1], 1, tl.int64) tmp16 = tmp13 < tmp15 tmp17 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0, [XBLOCK, RBLOCK]), tmp16, other=0.0) tmp18 = tmp13 >= tmp15 tl.full([1, 1], 3, tl.int64) tmp21 = tl.load(in_ptr0 + tl.broadcast_to(40 + r0 + 4 * -1, [XBLOCK, RBLOCK]), tmp18, other=0.0) tmp22 = tl.where(tmp16, tmp17, tmp21) tmp24 = tmp15 < tmp15 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tmp15 >= tmp15 tmp28 = tl.load(in_ptr0 + tl.broadcast_to(40 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp26, other=0.0) tmp29 = tl.where(tmp24, tmp25, tmp28) tmp30 = tmp22 + tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp33 = tmp31 < tmp15 tmp34 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0, [XBLOCK, RBLOCK]), tmp33, other=0.0) tmp35 = tmp31 >= tmp15 tmp37 = tl.load(in_ptr0 + tl.broadcast_to(40 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp35, other=0.0) tmp38 = tl.where(tmp33, tmp34, tmp37) tmp39 = tmp30 + tmp38 tmp40 = 3.0 tmp41 = tmp39 / tmp40 tmp42 = tmp5 * tmp5 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp45 = tl.sum(tmp43, 1)[:, None] tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp41, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) tl.store(out_ptr6 + tl.full([XBLOCK, 1], 0, tl.int32), tmp45, None) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (48 + r0), None) tmp5 = tl.load(in_ptr0 + (52 + r0), None) tmp7 = tl.load(in_ptr0 + (56 + r0), None) tmp9 = tl.load(in_ptr0 + (60 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp0 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tl.full([1, 1], 0, tl.int64) tmp15 = tl.full([1, 1], 1, tl.int64) tmp16 = tmp13 < tmp15 tmp17 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0, [XBLOCK, RBLOCK]), tmp16, other=0.0) tmp18 = tmp13 >= tmp15 tl.full([1, 1], 3, tl.int64) tmp21 = tl.load(in_ptr0 + tl.broadcast_to(56 + r0 + 4 * -1, [XBLOCK, RBLOCK]), tmp18, other=0.0) tmp22 = tl.where(tmp16, tmp17, tmp21) tmp24 = tmp15 < tmp15 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tmp15 >= tmp15 tmp28 = tl.load(in_ptr0 + tl.broadcast_to(56 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp26, other=0.0) tmp29 = tl.where(tmp24, tmp25, tmp28) tmp30 = tmp22 + tmp29 tmp31 = tl.full([1, 1], 2, tl.int64) tmp33 = tmp31 < tmp15 tmp34 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0, [XBLOCK, RBLOCK]), tmp33, other=0.0) tmp35 = tmp31 >= tmp15 tmp37 = tl.load(in_ptr0 + tl.broadcast_to(56 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp35, other=0.0) tmp38 = tl.where(tmp33, tmp34, tmp37) tmp39 = tmp30 + tmp38 tmp40 = 3.0 tmp41 = tmp39 / tmp40 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr7 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp41, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_linalg_vector_norm_5(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp39 = tl.load(in_ptr0 + r0, None) tmp40 = tl.load(in_ptr0 + (4 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = tl.full([1, 1], 0, tl.int64) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp10 < tmp12 tmp14 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp13, other=0.0) tmp15 = tmp10 >= tmp12 tl.full([1, 1], 3, tl.int64) tmp18 = tl.load(in_ptr0 + tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK]), tmp15, other=0.0) tmp19 = tl.where(tmp13, tmp14, tmp18) tmp20 = tl.full([1, 1], 1, tl.int64) tmp22 = tmp20 < tmp12 tmp23 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp22, other=0.0) tmp24 = tmp20 >= tmp12 tmp26 = tl.load(in_ptr0 + tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp27 = tl.where(tmp22, tmp23, tmp26) tmp28 = tmp19 + tmp27 tmp30 = tmp12 < tmp12 tmp31 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * 2, [XBLOCK, RBLOCK]), tmp30, other=0.0) tmp32 = tmp12 >= tmp12 tmp34 = tl.load(in_ptr0 + tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK]), tmp32, other=0.0) tmp35 = tl.where(tmp30, tmp31, tmp34) tmp36 = tmp28 + tmp35 tmp37 = 3.0 tmp38 = tmp36 / tmp37 tmp41 = tmp39 + tmp40 tmp42 = tmp41 + tmp0 tmp43 = tmp42 + tmp5 tmp44 = 4.0 tmp45 = tmp43 / tmp44 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp38, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr7 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr8 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp32 = tl.load(in_ptr4 + 0) tmp33 = tl.broadcast_to(tmp32, [XBLOCK]) tmp57 = tl.load(in_ptr7 + 0) tmp58 = tl.broadcast_to(tmp57, [XBLOCK]) tmp81 = tl.load(in_ptr10 + 0) tmp82 = tl.broadcast_to(tmp81, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = libdevice.sqrt(tmp7) tmp9 = tl.load(in_ptr2 + 4 * x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tmp9 * tmp9 tmp11 = tl.load(in_ptr2 + (1 + 4 * x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tl.load(in_ptr2 + (3 + 4 * x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = libdevice.sqrt(tmp19) tmp21 = tmp8 * tmp20 tmp22 = tmp5 / tmp21 tmp23 = 1e-06 tmp24 = triton_helpers.maximum(tmp22, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp4, tmp24, tmp25) tmp27 = tmp0 >= tmp3 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr3 + x0, tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp34 = libdevice.sqrt(tmp33) tmp35 = tl.load(in_ptr5 + 4 * x0, tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tmp35 * tmp35 tmp37 = tl.load(in_ptr5 + (1 + 4 * x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tl.load(in_ptr5 + (2 + 4 * x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tl.load(in_ptr5 + (3 + 4 * x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp44 = tmp43 * tmp43 tmp45 = tmp42 + tmp44 tmp46 = libdevice.sqrt(tmp45) tmp47 = tmp34 * tmp46 tmp48 = tmp31 / tmp47 tmp49 = triton_helpers.maximum(tmp48, tmp23) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp30, tmp49, tmp50) tmp52 = tmp0 >= tmp28 tmp53 = tl.full([1], 3, tl.int64) tmp54 = tmp0 < tmp53 tmp55 = tmp52 & tmp54 tmp56 = tl.load(in_ptr6 + x0, tmp55 & xmask, eviction_policy= 'evict_last', other=0.0) tmp59 = libdevice.sqrt(tmp58) tmp60 = tl.load(in_ptr8 + 4 * x0, tmp55 & xmask, eviction_policy= 'evict_last', other=0.0) tmp61 = tmp60 * tmp60 tmp62 = tl.load(in_ptr8 + (1 + 4 * x0), tmp55 & xmask, eviction_policy= 'evict_last', other=0.0) tmp63 = tmp62 * tmp62 tmp64 = tmp61 + tmp63 tmp65 = tl.load(in_ptr8 + (2 + 4 * x0), tmp55 & xmask, eviction_policy= 'evict_last', other=0.0) tmp66 = tmp65 * tmp65 tmp67 = tmp64 + tmp66 tmp68 = tl.load(in_ptr8 + (3 + 4 * x0), tmp55 & xmask, eviction_policy= 'evict_last', other=0.0) tmp69 = tmp68 * tmp68 tmp70 = tmp67 + tmp69 tmp71 = libdevice.sqrt(tmp70) tmp72 = tmp59 * tmp71 tmp73 = tmp56 / tmp72 tmp74 = triton_helpers.maximum(tmp73, tmp23) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp55, tmp74, tmp75) tmp77 = tmp0 >= tmp53 tl.full([1], 4, tl.int64) tmp80 = tl.load(in_ptr9 + x0, tmp77 & xmask, eviction_policy= 'evict_last', other=0.0) tmp83 = libdevice.sqrt(tmp82) tmp84 = tl.load(in_ptr11 + 4 * x0, tmp77 & xmask, eviction_policy= 'evict_last', other=0.0) tmp85 = tmp84 * tmp84 tmp86 = tl.load(in_ptr11 + (1 + 4 * x0), tmp77 & xmask, eviction_policy ='evict_last', other=0.0) tmp87 = tmp86 * tmp86 tmp88 = tmp85 + tmp87 tmp89 = tl.load(in_ptr11 + (2 + 4 * x0), tmp77 & xmask, eviction_policy ='evict_last', other=0.0) tmp90 = tmp89 * tmp89 tmp91 = tmp88 + tmp90 tmp92 = tl.load(in_ptr11 + (3 + 4 * x0), tmp77 & xmask, eviction_policy ='evict_last', other=0.0) tmp93 = tmp92 * tmp92 tmp94 = tmp91 + tmp93 tmp95 = libdevice.sqrt(tmp94) tmp96 = tmp83 * tmp95 tmp97 = tmp80 / tmp96 tmp98 = triton_helpers.maximum(tmp97, tmp23) tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype) tmp100 = tl.where(tmp77, tmp98, tmp99) tmp101 = tl.where(tmp55, tmp76, tmp100) tmp102 = tl.where(tmp30, tmp51, tmp101) tmp103 = tl.where(tmp4, tmp26, tmp102) tl.store(out_ptr0 + x2, tmp103, xmask) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (20 + r0), None) tmp5 = tl.load(in_ptr0 + (24 + r0), None) tmp10 = tl.load(in_ptr0 + (28 + r0), None) tmp44 = tl.load(in_ptr0 + (16 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = tl.full([1, 1], 0, tl.int64) tmp17 = tl.full([1, 1], 2, tl.int64) tmp18 = tmp15 < tmp17 tmp19 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp18, other=0.0) tmp20 = tmp15 >= tmp17 tl.full([1, 1], 3, tl.int64) tmp23 = tl.load(in_ptr0 + tl.broadcast_to(28 + r0, [XBLOCK, RBLOCK]), tmp20, other=0.0) tmp24 = tl.where(tmp18, tmp19, tmp23) tmp25 = tl.full([1, 1], 1, tl.int64) tmp27 = tmp25 < tmp17 tmp28 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp27, other=0.0) tmp29 = tmp25 >= tmp17 tmp31 = tl.load(in_ptr0 + tl.broadcast_to(28 + r0, [XBLOCK, RBLOCK]), tmp29, other=0.0) tmp32 = tl.where(tmp27, tmp28, tmp31) tmp33 = tmp24 + tmp32 tmp35 = tmp17 < tmp17 tmp36 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * 2, [XBLOCK, RBLOCK]), tmp35, other=0.0) tmp37 = tmp17 >= tmp17 tmp39 = tl.load(in_ptr0 + tl.broadcast_to(28 + r0, [XBLOCK, RBLOCK]), tmp37, other=0.0) tmp40 = tl.where(tmp35, tmp36, tmp39) tmp41 = tmp33 + tmp40 tmp42 = 3.0 tmp43 = tmp41 / tmp42 tmp45 = tmp44 + tmp0 tmp46 = tmp45 + tmp5 tmp47 = tmp46 + tmp10 tmp48 = 4.0 tmp49 = tmp47 / tmp48 tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp43, None) tl.store(out_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp49, None) tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp49, None) tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp49, None) tl.store(out_ptr7 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp49, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_stack_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (40 + r0), None) tmp5 = tl.load(in_ptr0 + (44 + r0), None) tmp39 = tl.load(in_ptr0 + (32 + r0), None) tmp40 = tl.load(in_ptr0 + (36 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = tl.full([1, 1], 0, tl.int64) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp10 < tmp12 tmp14 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp13, other=0.0) tmp15 = tmp10 >= tmp12 tl.full([1, 1], 3, tl.int64) tmp18 = tl.load(in_ptr0 + tl.broadcast_to(44 + r0, [XBLOCK, RBLOCK]), tmp15, other=0.0) tmp19 = tl.where(tmp13, tmp14, tmp18) tmp20 = tl.full([1, 1], 1, tl.int64) tmp22 = tmp20 < tmp12 tmp23 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp22, other=0.0) tmp24 = tmp20 >= tmp12 tmp26 = tl.load(in_ptr0 + tl.broadcast_to(44 + r0, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp27 = tl.where(tmp22, tmp23, tmp26) tmp28 = tmp19 + tmp27 tmp30 = tmp12 < tmp12 tmp31 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * 2, [XBLOCK, RBLOCK]), tmp30, other=0.0) tmp32 = tmp12 >= tmp12 tmp34 = tl.load(in_ptr0 + tl.broadcast_to(44 + r0, [XBLOCK, RBLOCK]), tmp32, other=0.0) tmp35 = tl.where(tmp30, tmp31, tmp34) tmp36 = tmp28 + tmp35 tmp37 = 3.0 tmp38 = tmp36 / tmp37 tmp41 = tmp39 + tmp40 tmp42 = tmp41 + tmp0 tmp43 = tmp42 + tmp5 tmp44 = 4.0 tmp45 = tmp43 / tmp44 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp38, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp45, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) @triton.jit def triton_per_fused_linalg_vector_norm_10(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (52 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_per_fused_cat_linalg_vector_norm_mean_11(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp29 = tl.load(in_ptr0 + (60 + r0), None) tmp0 = tl.full([1, 1], 0, tl.int64) tmp2 = tl.full([1, 1], 2, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * 0, [XBLOCK, RBLOCK]), tmp3, other=0.0) tmp5 = tmp0 >= tmp2 tl.full([1, 1], 3, tl.int64) tmp8 = tl.load(in_ptr0 + tl.broadcast_to(60 + r0, [XBLOCK, RBLOCK]), tmp5, other=0.0) tmp9 = tl.where(tmp3, tmp4, tmp8) tmp10 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp2 tmp13 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * 1, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp14 = tmp10 >= tmp2 tmp16 = tl.load(in_ptr0 + tl.broadcast_to(60 + r0, [XBLOCK, RBLOCK]), tmp14, other=0.0) tmp17 = tl.where(tmp12, tmp13, tmp16) tmp18 = tmp9 + tmp17 tmp20 = tmp2 < tmp2 tmp21 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * 2, [XBLOCK, RBLOCK]), tmp20, other=0.0) tmp22 = tmp2 >= tmp2 tmp24 = tl.load(in_ptr0 + tl.broadcast_to(60 + r0, [XBLOCK, RBLOCK]), tmp22, other=0.0) tmp25 = tl.where(tmp20, tmp21, tmp24) tmp26 = tmp18 + tmp25 tmp27 = 3.0 tmp28 = tmp26 / tmp27 tmp30 = tmp29 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp28, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None) @triton.jit def triton_per_fused_linalg_vector_norm_12(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (56 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None) @triton.jit def triton_poi_fused_stack_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (8 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = tl.load(in_ptr0 + (12 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (16 + (-4 + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr0 + (20 + (-4 + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tl.load(in_ptr0 + (24 + (-4 + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 + tmp23 tmp25 = tl.load(in_ptr0 + (28 + (-4 + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tmp26 / tmp12 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp19, tmp27, tmp28) tmp30 = tmp0 >= tmp17 tmp31 = tl.full([1], 12, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (32 + (-8 + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr0 + (36 + (-8 + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp34 + tmp35 tmp37 = tl.load(in_ptr0 + (40 + (-8 + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tl.load(in_ptr0 + (44 + (-8 + x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tmp38 + tmp39 tmp41 = tmp40 / tmp12 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp33, tmp41, tmp42) tmp44 = tmp0 >= tmp31 tl.full([1], 16, tl.int64) tmp47 = tl.load(in_ptr0 + (48 + (-12 + x0)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tl.load(in_ptr0 + (52 + (-12 + x0)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tmp47 + tmp48 tmp50 = tl.load(in_ptr0 + (56 + (-12 + x0)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp49 + tmp50 tmp52 = 3.0 tmp53 = tmp51 / tmp52 tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype) tmp55 = tl.where(tmp44, tmp53, tmp54) tmp56 = tl.where(tmp33, tmp43, tmp55) tmp57 = tl.where(tmp19, tmp29, tmp56) tmp58 = tl.where(tmp4, tmp15, tmp57) tl.store(out_ptr0 + x0, tmp58, xmask) @triton.jit def triton_per_fused__log_softmax__log_softmax_backward_data_stack_14(in_ptr0, in_ptr1, in_ptr2, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp15 = tl.load(in_ptr1 + (4 + r0), None) tmp26 = tl.load(in_ptr1 + (8 + r0), None) tmp37 = tl.load(in_ptr1 + (12 + r0), None) tmp48 = tl.load(in_ptr1 + (16 + r0), None) tmp59 = tl.load(in_ptr1 + (20 + r0), None) tmp70 = tl.load(in_ptr1 + (24 + r0), None) tmp81 = tl.load(in_ptr1 + (28 + r0), None) tmp92 = tl.load(in_ptr1 + (32 + r0), None) tmp103 = tl.load(in_ptr1 + (36 + r0), None) tmp114 = tl.load(in_ptr1 + (40 + r0), None) tmp125 = tl.load(in_ptr1 + (44 + r0), None) tmp136 = tl.load(in_ptr1 + (48 + r0), None) tmp147 = tl.load(in_ptr1 + (52 + r0), None) tmp158 = tl.load(in_ptr1 + (56 + r0), None) tmp169 = tl.load(in_ptr1 + (60 + r0), None) tmp185 = tl.load(in_ptr1 + 0) tmp186 = tl.broadcast_to(tmp185, [XBLOCK, RBLOCK]) tmp199 = tl.load(in_ptr1 + 4) tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK]) tmp213 = tl.load(in_ptr1 + 8) tmp214 = tl.broadcast_to(tmp213, [XBLOCK, RBLOCK]) tmp226 = tl.load(in_ptr1 + 12) tmp227 = tl.broadcast_to(tmp226, [XBLOCK, RBLOCK]) tmp239 = tl.load(in_ptr1 + 17) tmp240 = tl.broadcast_to(tmp239, [XBLOCK, RBLOCK]) tmp249 = tl.load(in_ptr1 + 21) tmp250 = tl.broadcast_to(tmp249, [XBLOCK, RBLOCK]) tmp259 = tl.load(in_ptr1 + 25) tmp260 = tl.broadcast_to(tmp259, [XBLOCK, RBLOCK]) tmp269 = tl.load(in_ptr1 + 29) tmp270 = tl.broadcast_to(tmp269, [XBLOCK, RBLOCK]) tmp282 = tl.load(in_ptr1 + 34) tmp283 = tl.broadcast_to(tmp282, [XBLOCK, RBLOCK]) tmp292 = tl.load(in_ptr1 + 38) tmp293 = tl.broadcast_to(tmp292, [XBLOCK, RBLOCK]) tmp302 = tl.load(in_ptr1 + 42) tmp303 = tl.broadcast_to(tmp302, [XBLOCK, RBLOCK]) tmp312 = tl.load(in_ptr1 + 46) tmp313 = tl.broadcast_to(tmp312, [XBLOCK, RBLOCK]) tmp3 = tmp1 * tmp2 tmp6 = tmp3 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = triton_helpers.max2(tmp7, 1)[:, None] tmp10 = tmp6 - tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp1 * tmp15 tmp17 = tmp16 + tmp5 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = triton_helpers.max2(tmp18, 1)[:, None] tmp21 = tmp17 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp27 = tmp1 * tmp26 tmp28 = tmp27 + tmp5 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = triton_helpers.max2(tmp29, 1)[:, None] tmp32 = tmp28 - tmp31 tmp33 = tl_math.exp(tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp38 = tmp1 * tmp37 tmp39 = tmp38 + tmp5 tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK]) tmp42 = triton_helpers.max2(tmp40, 1)[:, None] tmp43 = tmp39 - tmp42 tmp44 = tl_math.exp(tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp49 = tmp1 * tmp48 tmp50 = tmp49 + tmp5 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = triton_helpers.max2(tmp51, 1)[:, None] tmp54 = tmp50 - tmp53 tmp55 = tl_math.exp(tmp54) tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.sum(tmp56, 1)[:, None] tmp60 = tmp1 * tmp59 tmp61 = tmp60 + tmp5 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = triton_helpers.max2(tmp62, 1)[:, None] tmp65 = tmp61 - tmp64 tmp66 = tl_math.exp(tmp65) tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.sum(tmp67, 1)[:, None] tmp71 = tmp1 * tmp70 tmp72 = tmp71 + tmp5 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = triton_helpers.max2(tmp73, 1)[:, None] tmp76 = tmp72 - tmp75 tmp77 = tl_math.exp(tmp76) tmp78 = tl.broadcast_to(tmp77, [XBLOCK, RBLOCK]) tmp80 = tl.sum(tmp78, 1)[:, None] tmp82 = tmp1 * tmp81 tmp83 = tmp82 + tmp5 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = triton_helpers.max2(tmp84, 1)[:, None] tmp87 = tmp83 - tmp86 tmp88 = tl_math.exp(tmp87) tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = tl.sum(tmp89, 1)[:, None] tmp93 = tmp1 * tmp92 tmp94 = tmp93 + tmp5 tmp95 = tl.broadcast_to(tmp94, [XBLOCK, RBLOCK]) tmp97 = triton_helpers.max2(tmp95, 1)[:, None] tmp98 = tmp94 - tmp97 tmp99 = tl_math.exp(tmp98) tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = tl.sum(tmp100, 1)[:, None] tmp104 = tmp1 * tmp103 tmp105 = tmp104 + tmp5 tmp106 = tl.broadcast_to(tmp105, [XBLOCK, RBLOCK]) tmp108 = triton_helpers.max2(tmp106, 1)[:, None] tmp109 = tmp105 - tmp108 tmp110 = tl_math.exp(tmp109) tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK]) tmp113 = tl.sum(tmp111, 1)[:, None] tmp115 = tmp1 * tmp114 tmp116 = tmp115 + tmp5 tmp117 = tl.broadcast_to(tmp116, [XBLOCK, RBLOCK]) tmp119 = triton_helpers.max2(tmp117, 1)[:, None] tmp120 = tmp116 - tmp119 tmp121 = tl_math.exp(tmp120) tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK]) tmp124 = tl.sum(tmp122, 1)[:, None] tmp126 = tmp1 * tmp125 tmp127 = tmp126 + tmp5 tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK]) tmp130 = triton_helpers.max2(tmp128, 1)[:, None] tmp131 = tmp127 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tl.broadcast_to(tmp132, [XBLOCK, RBLOCK]) tmp135 = tl.sum(tmp133, 1)[:, None] tmp137 = tmp1 * tmp136 tmp138 = tmp137 + tmp5 tmp139 = tl.broadcast_to(tmp138, [XBLOCK, RBLOCK]) tmp141 = triton_helpers.max2(tmp139, 1)[:, None] tmp142 = tmp138 - tmp141 tmp143 = tl_math.exp(tmp142) tmp144 = tl.broadcast_to(tmp143, [XBLOCK, RBLOCK]) tmp146 = tl.sum(tmp144, 1)[:, None] tmp148 = tmp1 * tmp147 tmp149 = tmp148 + tmp5 tmp150 = tl.broadcast_to(tmp149, [XBLOCK, RBLOCK]) tmp152 = triton_helpers.max2(tmp150, 1)[:, None] tmp153 = tmp149 - tmp152 tmp154 = tl_math.exp(tmp153) tmp155 = tl.broadcast_to(tmp154, [XBLOCK, RBLOCK]) tmp157 = tl.sum(tmp155, 1)[:, None] tmp159 = tmp1 * tmp158 tmp160 = tmp159 + tmp5 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = triton_helpers.max2(tmp161, 1)[:, None] tmp164 = tmp160 - tmp163 tmp165 = tl_math.exp(tmp164) tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK]) tmp168 = tl.sum(tmp166, 1)[:, None] tmp170 = tmp1 * tmp169 tmp171 = tmp170 + tmp5 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = triton_helpers.max2(tmp172, 1)[:, None] tmp175 = tmp171 - tmp174 tmp176 = tl_math.exp(tmp175) tmp177 = tl.broadcast_to(tmp176, [XBLOCK, RBLOCK]) tmp179 = tl.sum(tmp177, 1)[:, None] tmp180 = r0 tl.full([1, 1], 0, tl.int64) tmp183 = tl.full([1, 1], 1, tl.int64) tmp184 = tmp180 < tmp183 tmp187 = tmp1 * tmp186 tmp188 = tmp187 + tmp5 tmp189 = tmp188 - tmp9 tmp190 = tl_math.log(tmp14) tmp191 = tmp189 - tmp190 tmp192 = -tmp191 tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype) tmp194 = tl.where(tmp184, tmp192, tmp193) tmp195 = tmp180 >= tmp183 tmp196 = tl.full([1, 1], 2, tl.int64) tmp197 = tmp180 < tmp196 tmp198 = tmp195 & tmp197 tmp201 = tmp1 * tmp200 tmp202 = tmp201 + tmp5 tmp203 = tmp202 - tmp20 tmp204 = tl_math.log(tmp25) tmp205 = tmp203 - tmp204 tmp206 = -tmp205 tmp207 = tl.full(tmp206.shape, 0.0, tmp206.dtype) tmp208 = tl.where(tmp198, tmp206, tmp207) tmp209 = tmp180 >= tmp196 tmp210 = tl.full([1, 1], 3, tl.int64) tmp211 = tmp180 < tmp210 tmp212 = tmp209 & tmp211 tmp215 = tmp1 * tmp214 tmp216 = tmp215 + tmp5 tmp217 = tmp216 - tmp31 tmp218 = tl_math.log(tmp36) tmp219 = tmp217 - tmp218 tmp220 = -tmp219 tmp221 = tl.full(tmp220.shape, 0.0, tmp220.dtype) tmp222 = tl.where(tmp212, tmp220, tmp221) tmp223 = tmp180 >= tmp210 tl.full([1, 1], 4, tl.int64) tmp228 = tmp1 * tmp227 tmp229 = tmp228 + tmp5 tmp230 = tmp229 - tmp42 tmp231 = tl_math.log(tmp47) tmp232 = tmp230 - tmp231 tmp233 = -tmp232 tmp234 = tl.full(tmp233.shape, 0.0, tmp233.dtype) tmp235 = tl.where(tmp223, tmp233, tmp234) tmp236 = tl.where(tmp212, tmp222, tmp235) tmp237 = tl.where(tmp198, tmp208, tmp236) tmp238 = tl.where(tmp184, tmp194, tmp237) tmp241 = tmp1 * tmp240 tmp242 = tmp241 + tmp5 tmp243 = tmp242 - tmp53 tmp244 = tl_math.log(tmp58) tmp245 = tmp243 - tmp244 tmp246 = -tmp245 tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype) tmp248 = tl.where(tmp184, tmp246, tmp247) tmp251 = tmp1 * tmp250 tmp252 = tmp251 + tmp5 tmp253 = tmp252 - tmp64 tmp254 = tl_math.log(tmp69) tmp255 = tmp253 - tmp254 tmp256 = -tmp255 tmp257 = tl.full(tmp256.shape, 0.0, tmp256.dtype) tmp258 = tl.where(tmp198, tmp256, tmp257) tmp261 = tmp1 * tmp260 tmp262 = tmp261 + tmp5 tmp263 = tmp262 - tmp75 tmp264 = tl_math.log(tmp80) tmp265 = tmp263 - tmp264 tmp266 = -tmp265 tmp267 = tl.full(tmp266.shape, 0.0, tmp266.dtype) tmp268 = tl.where(tmp212, tmp266, tmp267) tmp271 = tmp1 * tmp270 tmp272 = tmp271 + tmp5 tmp273 = tmp272 - tmp86 tmp274 = tl_math.log(tmp91) tmp275 = tmp273 - tmp274 tmp276 = -tmp275 tmp277 = tl.full(tmp276.shape, 0.0, tmp276.dtype) tmp278 = tl.where(tmp223, tmp276, tmp277) tmp279 = tl.where(tmp212, tmp268, tmp278) tmp280 = tl.where(tmp198, tmp258, tmp279) tmp281 = tl.where(tmp184, tmp248, tmp280) tmp284 = tmp1 * tmp283 tmp285 = tmp284 + tmp5 tmp286 = tmp285 - tmp97 tmp287 = tl_math.log(tmp102) tmp288 = tmp286 - tmp287 tmp289 = -tmp288 tmp290 = tl.full(tmp289.shape, 0.0, tmp289.dtype) tmp291 = tl.where(tmp184, tmp289, tmp290) tmp294 = tmp1 * tmp293 tmp295 = tmp294 + tmp5 tmp296 = tmp295 - tmp108 tmp297 = tl_math.log(tmp113) tmp298 = tmp296 - tmp297 tmp299 = -tmp298 tmp300 = tl.full(tmp299.shape, 0.0, tmp299.dtype) tmp301 = tl.where(tmp198, tmp299, tmp300) tmp304 = tmp1 * tmp303 tmp305 = tmp304 + tmp5 tmp306 = tmp305 - tmp119 tmp307 = tl_math.log(tmp124) tmp308 = tmp306 - tmp307 tmp309 = -tmp308 tmp310 = tl.full(tmp309.shape, 0.0, tmp309.dtype) tmp311 = tl.where(tmp212, tmp309, tmp310) tmp314 = tmp1 * tmp313 tmp315 = tmp314 + tmp5 tmp316 = tmp315 - tmp130 tmp317 = tl_math.log(tmp135) tmp318 = tmp316 - tmp317 tmp319 = -tmp318 tmp320 = tl.full(tmp319.shape, 0.0, tmp319.dtype) tmp321 = tl.where(tmp223, tmp319, tmp320) tmp322 = tl.where(tmp212, tmp311, tmp321) tmp323 = tl.where(tmp198, tmp301, tmp322) tmp324 = tl.where(tmp184, tmp291, tmp323) tmp325 = tl_math.log(tmp146) tmp326 = tmp142 - tmp325 tmp327 = tl_math.exp(tmp326) tmp328 = tl_math.log(tmp157) tmp329 = tmp153 - tmp328 tmp330 = tl_math.exp(tmp329) tmp331 = tl_math.log(tmp168) tmp332 = tmp164 - tmp331 tmp333 = tl_math.exp(tmp332) tmp334 = tl_math.log(tmp179) tmp335 = tmp175 - tmp334 tmp336 = tl_math.exp(tmp335) tl.store(out_ptr32 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp238, None) tl.store(out_ptr33 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp281, None) tl.store(out_ptr34 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp324, None) tl.store(out_ptr35 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp326, None) tl.store(out_ptr36 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp327, None) tl.store(out_ptr37 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp329, None) tl.store(out_ptr38 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp330, None) tl.store(out_ptr39 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp332, None) tl.store(out_ptr40 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp333, None) tl.store(out_ptr41 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp335, None) tl.store(out_ptr42 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp336, None) @triton.jit def triton_poi_fused_stack_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp14 = tl.load(in_ptr1 + 3) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp23 = tl.load(in_ptr2 + 3) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = -tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 2, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp16 = -tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 3, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp25 = -tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tl.full([1], 4, tl.int64) tmp33 = -tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp28, tmp33, tmp34) tmp36 = tl.where(tmp22, tmp27, tmp35) tmp37 = tl.where(tmp13, tmp18, tmp36) tmp38 = tl.where(tmp4, tmp9, tmp37) tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_per_fused_mean_16(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 16.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (), ()) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) buf17 = empty_strided_cuda((16,), (1,), torch.float32) buf21 = empty_strided_cuda((16,), (1,), torch.float32) buf38 = empty_strided_cuda((16,), (1,), torch.float32) buf42 = empty_strided_cuda((16,), (1,), torch.float32) buf59 = empty_strided_cuda((16,), (1,), torch.float32) buf63 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(16)](primals_1, buf0, buf17, buf21, buf38, buf42, buf59, buf63, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((), (), torch.float32) buf7 = empty_strided_cuda((16,), (1,), torch.float32) buf3 = reinterpret_tensor(buf7, (4,), (1,), 0) triton_per_fused_cat_linalg_vector_norm_mean_1[grid(1)](primals_1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf23 = empty_strided_cuda((), (), torch.float32) buf4 = reinterpret_tensor(buf7, (4,), (1,), 4) buf14 = empty_strided_cuda((16,), (1,), torch.float32) buf11 = reinterpret_tensor(buf14, (4,), (1,), 4) buf28 = empty_strided_cuda((16,), (1,), torch.float32) buf24 = reinterpret_tensor(buf28, (4,), (1,), 4) triton_per_fused_cat_linalg_vector_norm_mean_stack_2[grid(1)](primals_1 , buf23, buf4, buf11, buf24, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf44 = empty_strided_cuda((), (), torch.float32) buf5 = reinterpret_tensor(buf7, (4,), (1,), 8) buf12 = reinterpret_tensor(buf14, (4,), (1,), 8) buf26 = reinterpret_tensor(buf28, (4,), (1,), 8) buf35 = empty_strided_cuda((16,), (1,), torch.float32) buf33 = reinterpret_tensor(buf35, (4,), (1,), 8) buf49 = empty_strided_cuda((16,), (1,), torch.float32) buf45 = reinterpret_tensor(buf49, (4,), (1,), 8) buf51 = empty_strided_cuda((), (), torch.float32) triton_per_fused_cat_linalg_vector_norm_mean_stack_3[grid(1)](primals_1 , buf44, buf5, buf12, buf26, buf33, buf45, buf51, 1, 4, XBLOCK= 1, num_warps=2, num_stages=1) buf65 = empty_strided_cuda((), (), torch.float32) buf6 = reinterpret_tensor(buf7, (4,), (1,), 12) buf13 = reinterpret_tensor(buf14, (4,), (1,), 12) buf27 = reinterpret_tensor(buf28, (4,), (1,), 12) buf34 = reinterpret_tensor(buf35, (4,), (1,), 12) buf48 = reinterpret_tensor(buf49, (4,), (1,), 12) buf56 = empty_strided_cuda((16,), (1,), torch.float32) buf55 = reinterpret_tensor(buf56, (4,), (1,), 12) buf70 = empty_strided_cuda((16,), (1,), torch.float32) buf66 = reinterpret_tensor(buf70, (4,), (1,), 12) triton_per_fused_cat_linalg_vector_norm_mean_stack_4[grid(1)](primals_1 , buf65, buf6, buf13, buf27, buf34, buf48, buf55, buf66, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf3 del buf4 del buf5 del buf6 buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((), (), torch.float32) triton_per_fused_linalg_vector_norm_5[grid(1)](primals_1, buf9, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf16 = empty_strided_cuda((), (), torch.float32) buf19 = empty_strided_cuda((), (), torch.float32) buf10 = reinterpret_tensor(buf14, (4,), (1,), 0) buf25 = reinterpret_tensor(buf28, (4,), (1,), 0) buf32 = reinterpret_tensor(buf35, (4,), (1,), 0) buf46 = reinterpret_tensor(buf49, (4,), (1,), 0) buf53 = reinterpret_tensor(buf56, (4,), (1,), 0) buf67 = reinterpret_tensor(buf70, (4,), (1,), 0) buf77 = empty_strided_cuda((16,), (1,), torch.float32) buf74 = reinterpret_tensor(buf77, (4,), (1,), 0) triton_per_fused_cat_linalg_vector_norm_mean_stack_6[grid(1)](primals_1 , buf16, buf19, buf10, buf25, buf32, buf46, buf53, buf67, buf74, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf11 del buf12 del buf13 buf15 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 8), reinterpret_tensor(buf14, (4, 4), (1, 4), 0), out=buf15) buf18 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 12), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), out=buf18) buf84 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf20 = reinterpret_tensor(buf84, (4, 4), (4, 1), 0) triton_poi_fused_cat_7[grid(16)](buf1, buf2, buf0, buf8, buf9, buf7, buf15, buf16, buf14, buf18, buf19, buf17, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf14 del buf16 del buf17 del buf7 buf22 = buf8 del buf8 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 16), reinterpret_tensor(buf21, (4, 4), (1, 4), 0), out=buf22) del buf24 del buf25 del buf26 del buf27 buf29 = buf18 del buf18 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 20), reinterpret_tensor(buf28, (4, 4), (1, 4), 0), out=buf29) buf30 = buf9 del buf9 buf37 = buf2 del buf2 buf40 = buf19 del buf19 buf31 = reinterpret_tensor(buf35, (4,), (1,), 4) buf47 = reinterpret_tensor(buf49, (4,), (1,), 4) buf54 = reinterpret_tensor(buf56, (4,), (1,), 4) buf68 = reinterpret_tensor(buf70, (4,), (1,), 4) buf75 = reinterpret_tensor(buf77, (4,), (1,), 4) triton_per_fused_cat_linalg_vector_norm_mean_stack_8[grid(1)](primals_1 , buf30, buf37, buf40, buf31, buf47, buf54, buf68, buf75, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf31 del buf32 del buf33 del buf34 buf36 = buf15 del buf15 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 24), reinterpret_tensor(buf35, (4, 4), (1, 4), 0), out=buf36) buf39 = buf1 del buf1 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 28), reinterpret_tensor(buf38, (4, 4), (1, 4), 0), out=buf39) buf41 = reinterpret_tensor(buf84, (4, 4), (4, 1), 16) triton_poi_fused_cat_7[grid(16)](buf22, buf23, buf21, buf29, buf30, buf28, buf36, buf37, buf35, buf39, buf40, buf38, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf21 del buf23 del buf28 del buf30 del buf35 del buf38 buf43 = buf39 del buf39 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 32), reinterpret_tensor(buf42, (4, 4), (1, 4), 0), out=buf43) del buf45 del buf46 del buf47 del buf48 buf50 = buf36 del buf36 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 36), reinterpret_tensor(buf49, (4, 4), (1, 4), 0), out=buf50) buf58 = buf40 del buf40 buf61 = buf37 del buf37 buf52 = reinterpret_tensor(buf56, (4,), (1,), 8) buf69 = reinterpret_tensor(buf70, (4,), (1,), 8) buf76 = reinterpret_tensor(buf77, (4,), (1,), 8) triton_per_fused_cat_linalg_vector_norm_mean_stack_9[grid(1)](primals_1 , buf58, buf61, buf52, buf69, buf76, 1, 4, XBLOCK=1, num_warps= 2, num_stages=1) del buf52 del buf53 del buf54 del buf55 buf57 = buf29 del buf29 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 40), reinterpret_tensor(buf56, (4, 4), (1, 4), 0), out=buf57) buf60 = buf22 del buf22 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 44), reinterpret_tensor(buf59, (4, 4), (1, 4), 0), out=buf60) buf62 = reinterpret_tensor(buf84, (4, 4), (4, 1), 32) triton_poi_fused_cat_7[grid(16)](buf43, buf44, buf42, buf50, buf51, buf49, buf57, buf58, buf56, buf60, buf61, buf59, buf62, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf42 del buf44 del buf49 del buf56 buf64 = buf60 del buf60 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 48), reinterpret_tensor(buf63, (4, 4), (1, 4), 0), out=buf64) del buf66 del buf67 del buf68 del buf69 buf71 = buf57 del buf57 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 52), reinterpret_tensor(buf70, (4, 4), (1, 4), 0), out=buf71) buf72 = buf61 del buf61 triton_per_fused_linalg_vector_norm_10[grid(1)](primals_1, buf72, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf73 = reinterpret_tensor(buf77, (4,), (1,), 12) buf82 = buf58 del buf58 triton_per_fused_cat_linalg_vector_norm_mean_11[grid(1)](primals_1, buf73, buf82, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf73 del buf74 del buf75 del buf76 buf78 = buf50 del buf50 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 56), reinterpret_tensor(buf77, (4, 4), (1, 4), 0), out=buf78) buf79 = buf51 del buf51 triton_per_fused_linalg_vector_norm_12[grid(1)](primals_1, buf79, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf80 = buf59 del buf59 triton_poi_fused_stack_13[grid(16)](primals_1, buf80, 16, XBLOCK=16, num_warps=1, num_stages=1) buf81 = buf43 del buf43 extern_kernels.mm(reinterpret_tensor(primals_1, (1, 4), (1, 1), 60), reinterpret_tensor(buf80, (4, 4), (1, 4), 0), out=buf81) del primals_1 buf83 = reinterpret_tensor(buf84, (4, 4), (4, 1), 48) triton_poi_fused_cat_7[grid(16)](buf64, buf65, buf63, buf71, buf72, buf70, buf78, buf79, buf77, buf81, buf82, buf80, buf83, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf63 del buf65 del buf70 del buf72 del buf77 del buf79 buf125 = buf80 del buf80 buf93 = reinterpret_tensor(buf125, (4,), (1,), 0) buf102 = reinterpret_tensor(buf125, (4,), (1,), 4) buf111 = reinterpret_tensor(buf125, (4,), (1,), 8) buf114 = reinterpret_tensor(buf81, (4,), (1,), 0) del buf81 buf130 = reinterpret_tensor(buf78, (4,), (1,), 0) del buf78 buf117 = reinterpret_tensor(buf71, (4,), (1,), 0) del buf71 buf129 = reinterpret_tensor(buf64, (4,), (1,), 0) del buf64 buf120 = empty_strided_cuda((4,), (1,), torch.float32) buf128 = empty_strided_cuda((4,), (1,), torch.float32) buf123 = empty_strided_cuda((4,), (1,), torch.float32) buf127 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused__log_softmax__log_softmax_backward_data_stack_14[grid (1)](primals_2, buf84, primals_3, buf93, buf102, buf111, buf114, buf130, buf117, buf129, buf120, buf128, buf123, buf127, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf124 = reinterpret_tensor(buf125, (4,), (1,), 12) triton_poi_fused_stack_15[grid(4)](buf114, buf117, buf120, buf123, buf124, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf114 del buf117 del buf120 del buf123 buf126 = buf82 del buf82 buf131 = buf126 del buf126 triton_per_fused_mean_16[grid(1)](buf131, buf125, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf102 del buf111 del buf124 del buf125 del buf93 return buf131, primals_2, primals_3, buf84, buf127, buf128, buf129, buf130 class GE2ELossNew(nn.Module): def __init__(self, init_w=10.0, init_b=-5.0, loss_method='softmax'): """ Implementation of the Generalized End-to-End loss defined in https://arxiv.org/abs/1710.10467 [1] Accepts an input of size (N, M, D) where N is the number of speakers in the batch, M is the number of utterances per speaker, and D is the dimensionality of the embedding vector (e.g. d-vector) Args: - init_w (float): defines the initial value of w in Equation (5) of [1] - init_b (float): definies the initial value of b in Equation (5) of [1] """ super(GE2ELossNew, self).__init__() self.w = nn.Parameter(torch.tensor(init_w)) self.b = nn.Parameter(torch.tensor(init_b)) self.loss_method = loss_method assert self.loss_method in ['softmax', 'contrast'] if self.loss_method == 'softmax': self.embed_loss = self.embed_loss_softmax if self.loss_method == 'contrast': self.embed_loss = self.embed_loss_contrast def calc_new_centroids(self, dvecs, centroids, spkr, utt): """ Calculates the new centroids excluding the reference utterance """ excl = torch.cat((dvecs[spkr, :utt], dvecs[spkr, utt + 1:])) excl = torch.mean(excl, 0) new_centroids = [] for i, centroid in enumerate(centroids): if i == spkr: new_centroids.append(excl) else: new_centroids.append(centroid) return torch.stack(new_centroids) def calc_cosine_sim(self, dvecs, centroids): """ Make the cosine similarity matrix with dims (N,M,N) """ cos_sim_matrix = [] for spkr_idx, speaker in enumerate(dvecs): cs_row = [] for utt_idx, utterance in enumerate(speaker): new_centroids = self.calc_new_centroids(dvecs, centroids, spkr_idx, utt_idx) cs_row.append(torch.clamp(torch.mm(utterance.unsqueeze(1). transpose(0, 1), new_centroids.transpose(0, 1)) / ( torch.norm(utterance) * torch.norm(new_centroids, dim=1 )), 1e-06)) cs_row = torch.cat(cs_row, dim=0) cos_sim_matrix.append(cs_row) return torch.stack(cos_sim_matrix) def embed_loss_softmax(self, dvecs, cos_sim_matrix): """ Calculates the loss on each embedding $L(e_{ji})$ by taking softmax """ N, M, _ = dvecs.shape L = [] for j in range(N): L_row = [] for i in range(M): L_row.append(-F.log_softmax(cos_sim_matrix[j, i], 0)[j]) L_row = torch.stack(L_row) L.append(L_row) return torch.stack(L) def embed_loss_contrast(self, dvecs, cos_sim_matrix): """ Calculates the loss on each embedding $L(e_{ji})$ by contrast loss with closest centroid """ N, M, _ = dvecs.shape L = [] for j in range(N): L_row = [] for i in range(M): centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i]) excl_centroids_sigmoids = torch.cat((centroids_sigmoids[:j], centroids_sigmoids[j + 1:])) L_row.append(1.0 - torch.sigmoid(cos_sim_matrix[j, i, j]) + torch.max(excl_centroids_sigmoids)) L_row = torch.stack(L_row) L.append(L_row) return torch.stack(L) def forward(self, input_0): primals_2 = self.w primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
greenstar1151/pytorch-benchmark
GE2ELoss
false
10,464
[ "BSD-3-Clause" ]
0
8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
https://github.com/greenstar1151/pytorch-benchmark/tree/8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
MyLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/fl/cflw6zjzdk2wqtau7m6nsei5vavjfijzxhb37zaa3xp4yxpw5yb2.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mul] # Source node to ATen node mapping: # bias => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_1, buf1, 4, grid=grid(4), stream=stream0) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias, linear], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinearNew(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
justinpinkney/ganspace
MyLinear
false
10,465
[ "Apache-2.0" ]
0
7dc76d1d2ddad21d946a7ceb375efe5d5316fb3f
https://github.com/justinpinkney/ganspace/tree/7dc76d1d2ddad21d946a7ceb375efe5d5316fb3f
VAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/g3/cg3el2gn3jo2uczn6kvxebxonhlsgf4gykdxpouwhsyjf55b5gdg.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ki/ckicw4ebs4esmjq6arnmoylvxkanfgm3d6liitiqr6rwy5d63nvv.py # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # std => exp # z => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%addmm_1, %mul_1), kwargs = {}) triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/hb/chbjjrtszu6f3bhry7ireqcm3ie3twpz5s7g7owb3zuauqhiqcby.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (500, 784), (784, 1)) assert_size_stride(primals_3, (500, ), (1, )) assert_size_stride(primals_4, (5, 500), (500, 1)) assert_size_stride(primals_5, (5, ), (1, )) assert_size_stride(primals_6, (5, 500), (500, 1)) assert_size_stride(primals_7, (5, ), (1, )) assert_size_stride(primals_8, (500, 5), (5, 1)) assert_size_stride(primals_9, (500, ), (1, )) assert_size_stride(primals_10, (784, 500), (500, 1)) assert_size_stride(primals_11, (784, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 500), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 2000, grid=grid(2000), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (500, 5), (1, 500), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (500, 5), (1, 500), 0), alpha=1, beta=1, out=buf3) del primals_7 # Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like] buf4 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] triton_poi_fused_add_exp_mul_1.run(buf2, buf5, buf3, buf6, 20, grid=grid(20), stream=stream0) buf7 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (5, 500), (1, 5), 0), out=buf7) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf8, primals_9, 2000, grid=grid(2000), stream=stream0) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (500, 784), (1, 500), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf10, primals_11, 3136, grid=grid(3136), stream=stream0) del primals_11 return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((500, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((5, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((5, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((500, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((784, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(784, 500) self.fc21 = nn.Linear(500, 5) self.fc22 = nn.Linear(500, 5) self.fc3 = nn.Linear(5, 500) self.fc4 = nn.Linear(500, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (500, 784), (784, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (5, 500), (500, 1)) assert_size_stride(primals_5, (5,), (1,)) assert_size_stride(primals_6, (5, 500), (500, 1)) assert_size_stride(primals_7, (5,), (1,)) assert_size_stride(primals_8, (500, 5), (5, 1)) assert_size_stride(primals_9, (500,), (1,)) assert_size_stride(primals_10, (784, 500), (500, 1)) assert_size_stride(primals_11, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 500), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2000)](buf1, primals_3, 2000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (500, 5), (1, 500), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (500, 5), (1, 500), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 5), (5, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(20)](buf2, buf5, buf3, buf6, 20, XBLOCK=32, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (5, 500), (1, 5), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_0[grid(2000)](buf8, primals_9, 2000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (500, 784), (1, 500), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_11, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): def __init__(self): super(VAENew, self).__init__() self.fc1 = nn.Linear(784, 500) self.fc21 = nn.Linear(500, 5) self.fc22 = nn.Linear(500, 5) self.fc3 = nn.Linear(5, 500) self.fc4 = nn.Linear(500, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
mcabbott/Avalon.jl
VAE
false
10,466
[ "MIT" ]
0
6885bcc8204952a2396e762ce51432d9969c4138
https://github.com/mcabbott/Avalon.jl/tree/6885bcc8204952a2396e762ce51432d9969c4138
ContinuousNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/y7/cy7463cf27qpra2f6ndigmu6ve4q6o3cbvgetgqehejtevv6yfa5.py # Topologically Sorted Source Nodes: [a1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # a1 => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_1, 0), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 6), kwargs = {}) # %bitwise_or_1 : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le_1, %ge_1), kwargs = {}) triton_poi_fused_hardtanh_hardtanh_backward_0 = async_compile.triton('triton_poi_fused_hardtanh_hardtanh_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_hardtanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oz/coz2vvm7j43dgp2scg63a7xyihnzn7xcwkbiwytoo3i3naiahfwp.py # Topologically Sorted Source Nodes: [tanh, mu], Original ATen: [aten.tanh, aten.mul] # Source node to ATen node mapping: # mu => mul # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 2), kwargs = {}) triton_poi_fused_mul_tanh_1 = async_compile.triton('triton_poi_fused_mul_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/az/cazly2utbmgqy4jzdtmpz56naibvqlqicln4ij6ptdvbf4ytmvkc.py # Topologically Sorted Source Nodes: [softplus, sigma], Original ATen: [aten.softplus, aten.add] # Source node to ATen node mapping: # sigma => add # softplus => exp, gt, log1p, where # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_5,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_5, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_5, %log1p), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 0.001), kwargs = {}) triton_poi_fused_add_softplus_2 = async_compile.triton('triton_poi_fused_add_softplus_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_softplus_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.001 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zv/czv7olwo7h6ucl4yfkx5wdow4pryybtkxjpmco7mpv23b5hl3wfb.py # Topologically Sorted Source Nodes: [c1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] # Source node to ATen node mapping: # c1 => clamp_max_1, clamp_min_1 # Graph fragment: # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_7, 0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 6), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_7, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_7, 6), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {}) triton_poi_fused_hardtanh_hardtanh_backward_3 = async_compile.triton('triton_poi_fused_hardtanh_hardtanh_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_hardtanh_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x3 = (xindex // 1600) x5 = xindex % 1600 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + (x4), tmp6, xmask) tl.store(out_ptr1 + (x5 + (1664*x3)), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 200), (200, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (100, 4), (4, 1)) assert_size_stride(primals_9, (100, ), (1, )) assert_size_stride(primals_10, (1, 100), (100, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) # Topologically Sorted Source Nodes: [a1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] stream0 = get_raw_stream(0) triton_poi_fused_hardtanh_hardtanh_backward_0.run(buf0, primals_2, buf1, buf11, 12800, grid=grid(12800), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mu], Original ATen: [aten.tanh, aten.mul] triton_poi_fused_mul_tanh_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, sigma], Original ATen: [aten.softplus, aten.add] triton_poi_fused_add_softplus_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 100), (1, 4), 0), out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [c1], Original ATen: [aten.hardtanh, aten.hardtanh_backward] triton_poi_fused_hardtanh_hardtanh_backward_3.run(buf6, primals_9, buf7, buf10, 6400, grid=grid(6400), stream=stream0) del buf6 del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 100), (100, 1), 0), reinterpret_tensor(primals_10, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf9) del primals_11 return (buf3, buf5, reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 200), (200, 1), 0), buf2, buf4, reinterpret_tensor(buf7, (64, 100), (100, 1), 0), primals_10, buf10, primals_6, primals_4, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class ContinuousNet(nn.Module): def __init__(self, s_dim, a_dim): super(ContinuousNet, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.a1 = nn.Linear(s_dim, 200) self.mu = nn.Linear(200, a_dim) self.sigma = nn.Linear(200, a_dim) self.c1 = nn.Linear(s_dim, 100) self.v = nn.Linear(100, 1) set_init([self.a1, self.mu, self.sigma, self.c1, self.v]) self.distribution = torch.distributions.Normal def forward(self, x): a1 = F.relu6(self.a1(x)) mu = 2 * F.tanh(self.mu(a1)) sigma = F.softplus(self.sigma(a1)) + 0.001 c1 = F.relu6(self.c1(x)) values = self.v(c1) return mu, sigma, values def choose_action(self, s): self.training = False mu, sigma, _ = self.forward(s) m = self.distribution(mu.view(1).data, sigma.view(1).data) return m.sample().numpy() def loss_func(self, s, a, v_t): self.train() mu, sigma, values = self.forward(s) td = v_t - values c_loss = td.pow(2) m = self.distribution(mu, sigma) log_prob = m.log_prob(a) entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(m.scale) exp_v = log_prob * td.detach() + 0.005 * entropy a_loss = -exp_v total_loss = (a_loss + c_loss).mean() return total_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'s_dim': 4, 'a_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_add_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.001 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_hardtanh_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x3 = xindex // 1600 x5 = xindex % 1600 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + (x5 + 1664 * x3), tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 200), (200, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 200), (200, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (100, 4), (4, 1)) assert_size_stride(primals_9, (100,), (1,)) assert_size_stride(primals_10, (1, 100), (100, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) get_raw_stream(0) triton_poi_fused_hardtanh_hardtanh_backward_0[grid(12800)](buf0, primals_2, buf1, buf11, 12800, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_softplus_2[grid(256)](buf4, buf5, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 100), (1, 4), 0), out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_hardtanh_hardtanh_backward_3[grid(6400)](buf6, primals_9, buf7, buf10, 6400, XBLOCK=256, num_warps=4, num_stages=1 ) del buf6 del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 100), (100, 1), 0), reinterpret_tensor(primals_10, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf9) del primals_11 return buf3, buf5, reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 200), (200, 1), 0 ), buf2, buf4, reinterpret_tensor(buf7, (64, 100), (100, 1), 0 ), primals_10, buf10, primals_6, primals_4, buf11 def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class ContinuousNetNew(nn.Module): def __init__(self, s_dim, a_dim): super(ContinuousNetNew, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.a1 = nn.Linear(s_dim, 200) self.mu = nn.Linear(200, a_dim) self.sigma = nn.Linear(200, a_dim) self.c1 = nn.Linear(s_dim, 100) self.v = nn.Linear(100, 1) set_init([self.a1, self.mu, self.sigma, self.c1, self.v]) self.distribution = torch.distributions.Normal def choose_action(self, s): self.training = False mu, sigma, _ = self.forward(s) m = self.distribution(mu.view(1).data, sigma.view(1).data) return m.sample().numpy() def loss_func(self, s, a, v_t): self.train() mu, sigma, values = self.forward(s) td = v_t - values c_loss = td.pow(2) m = self.distribution(mu, sigma) log_prob = m.log_prob(a) entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(m.scale) exp_v = log_prob * td.detach() + 0.005 * entropy a_loss = -exp_v total_loss = (a_loss + c_loss).mean() return total_loss def forward(self, input_0): primals_1 = self.a1.weight primals_2 = self.a1.bias primals_4 = self.mu.weight primals_5 = self.mu.bias primals_6 = self.sigma.weight primals_7 = self.sigma.bias primals_8 = self.c1.weight primals_9 = self.c1.bias primals_10 = self.v.weight primals_11 = self.v.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
lws803/pytorch-A3C
ContinuousNet
false
10,467
[ "MIT" ]
0
944e7f42a8fa54b7d6efbe169d8a3467b20a0f7f
https://github.com/lws803/pytorch-A3C/tree/944e7f42a8fa54b7d6efbe169d8a3467b20a0f7f
FullAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py # Topologically Sorted Source Nodes: [QK], Original ATen: [aten.clone] # Source node to ATen node mapping: # QK => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ri/cricgdtr5c24l63g746gjtdd45qor3pkzmi7qmyygyd24ejrijb7.py # Topologically Sorted Source Nodes: [QK], Original ATen: [aten.clone] # Source node to ATen node mapping: # QK => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ya/cyacjmbos4hqjbfqrids3ws7umxa2yjx6uocj6nk4q5qb3ifqsdu.py # Topologically Sorted Source Nodes: [mul, A], Original ATen: [aten.mul, aten._softmax] # Source node to ATen node mapping: # A => amax, clone_2, exp, sub # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {}) # %clone_2 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%mul,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_2, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_mul_2 = async_compile.triton('triton_poi_fused__softmax_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/vb/cvb3oguu73uopye5dab6pedxgpbqv7hv76dtm7n5cat2xumuixgo.py # Topologically Sorted Source Nodes: [queried_values], Original ATen: [aten.clone] # Source node to ATen node mapping: # queried_values => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [QK], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [QK], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(arg0_1, buf1, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [QK], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 1, 16), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [mul, A], Original ATen: [aten.mul, aten._softmax] triton_poi_fused__softmax_mul_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [queried_values], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [queried_values], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(arg2_1, buf5, 256, grid=grid(256), stream=stream0) del arg2_1 buf6 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [queried_values], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Dropout class FullAttention(Module): def __init__(self, use_dropout=False, attention_dropout=0.1): super().__init__() self.use_dropout = use_dropout self.dropout = Dropout(attention_dropout) def forward(self, queries, keys, values, q_mask=None, kv_mask=None): """ Multi-head scaled dot-product attention, a.k.a full attention. Args: queries: [N, L, H, D] keys: [N, S, H, D] values: [N, S, H, D] q_mask: [N, L] kv_mask: [N, S] Returns: queried_values: (N, L, H, D) """ QK = torch.einsum('nlhd,nshd->nlsh', queries, keys) if kv_mask is not None: QK.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :, None]), float('-inf')) softmax_temp = 1.0 / queries.size(3) ** 0.5 A = torch.softmax(softmax_temp * QK, dim=2) if self.use_dropout: A = self.dropout(A) queried_values = torch.einsum('nlsh,nshd->nlhd', A, values) return queried_values.contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn import Dropout assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_1[grid(64, 4)](arg0_1, buf1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 1, 16), 0) del buf1 triton_poi_fused__softmax_mul_2[grid(256)](buf2, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(256)](arg2_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 buf6 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_clone_0[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 return buf7, class FullAttentionNew(Module): def __init__(self, use_dropout=False, attention_dropout=0.1): super().__init__() self.use_dropout = use_dropout self.dropout = Dropout(attention_dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
lee-vius/LoFTR
FullAttention
false
10,468
[ "Apache-2.0" ]
0
dd9add373a20696fb6f020f4fda38bca7a91cdd9
https://github.com/lee-vius/LoFTR/tree/dd9add373a20696fb6f020f4fda38bca7a91cdd9
LRN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/gv/cgvon7iygyhjm2kmwtta5t2r3z2byfrr4qwpcmym3h4h6yzxvtvp.py # Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # div => pow_1 # div_1 => avg_pool2d # div_2 => pow_2 # mul => mul # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [1, 1], [1, 1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 0.0001), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 2.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_add_avg_pool2d_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0001 tmp5 = tmp3 * tmp4 tmp6 = 2.0 tmp7 = tmp5 + tmp6 tmp8 = 0.75 tmp9 = libdevice.pow(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool2d_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LRN(nn.Module): def __init__(self, local_size=1, alpha=0.0001, beta=0.75, ACROSS_CHANNELS=False): super(LRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if self.ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(2.0).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(2.0).pow(self.beta) x = x.div(div) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0001 tmp5 = tmp3 * tmp4 tmp6 = 2.0 tmp7 = tmp5 + tmp6 tmp8 = 0.75 tmp9 = libdevice.pow(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_avg_pool2d_div_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LRNNew(nn.Module): def __init__(self, local_size=1, alpha=0.0001, beta=0.75, ACROSS_CHANNELS=False): super(LRNNew, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if self.ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
melster1010/VIAME
LRN
false
10,469
[ "BSD-3-Clause" ]
0
0062265088aae65effbfcd130bfb874c343c785f
https://github.com/melster1010/VIAME/tree/0062265088aae65effbfcd130bfb874c343c785f
LinearAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/og/cogo4pfcy56l7ulvsx6und2vmih3qojwpvkiazyzh7hsptmp4vos.py # Topologically Sorted Source Nodes: [elu_1, K, sum_1], Original ATen: [aten.elu, aten.add, aten.sum] # Source node to ATen node mapping: # K => add_1 # elu_1 => expm1_1, gt_1, mul_3, mul_4, mul_5, where_1 # sum_1 => sum_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg1_1, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_4,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, 1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {}) triton_poi_fused_add_elu_sum_0 = async_compile.triton('triton_poi_fused_add_elu_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_elu_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_elu_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp25 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tmp10 = tmp9 > tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.expm1(tmp11) tmp13 = tmp12 * tmp3 tmp14 = tl.where(tmp10, tmp11, tmp13) tmp15 = tmp14 + tmp3 tmp16 = tmp8 + tmp15 tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp3 tmp22 = tl.where(tmp18, tmp19, tmp21) tmp23 = tmp22 + tmp3 tmp24 = tmp16 + tmp23 tmp26 = tmp25 > tmp1 tmp27 = tmp25 * tmp3 tmp28 = libdevice.expm1(tmp27) tmp29 = tmp28 * tmp3 tmp30 = tl.where(tmp26, tmp27, tmp29) tmp31 = tmp30 + tmp3 tmp32 = tmp24 + tmp31 tl.store(out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ui/cuicp7lcen5tosk4olu3x3oaa7yz6cvucka5zwp4hr2qbvjrvvxh.py # Topologically Sorted Source Nodes: [einsum_1, einsum_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum_1 => clone_2 # einsum_2 => clone_3 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_13,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp8, xmask) tl.store(out_ptr1 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ds/cdscs66baluy2qjp2egcmspwpssho6a6gwzskgkoo4xnrapvrluw.py # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.clone] # Source node to ATen node mapping: # KV => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp8, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/mf/cmffnm5icoqilxjyfgljttokrgpbt5d2oz7xydx6dxdlysmdthuj.py # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.clone] # Source node to ATen node mapping: # KV => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kd/ckd25s7vujyv5hxcsgrxyfy25zilz32mxdkf3yx2dqm3ovvchilg.py # Topologically Sorted Source Nodes: [queried_values, contiguous], Original ATen: [aten.mul, aten.clone] # Source node to ATen node mapping: # contiguous => clone_4 # queried_values => mul_8 # Graph fragment: # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 4), kwargs = {}) # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%mul_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_mul_4 = async_compile.triton('triton_poi_fused_clone_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x5 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x5), xmask) tmp1 = 1e-06 tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp2 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = 4.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [elu_1, K, sum_1], Original ATen: [aten.elu, aten.add, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_add_elu_sum_0.run(arg1_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum_1, einsum_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(arg0_1, buf1, buf6, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), out=buf2) del buf0 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(arg1_1, buf3, 16, 16, grid=grid(16, 16), stream=stream0) del arg1_1 buf4 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(arg2_1, buf4, 256, grid=grid(256), stream=stream0) del arg2_1 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) del buf3 buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [einsum_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), buf5, out=buf7) del buf5 buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [queried_values, contiguous], Original ATen: [aten.mul, aten.clone] triton_poi_fused_clone_mul_4.run(buf2, buf7, buf8, 256, grid=grid(256), stream=stream0) del buf2 del buf7 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch def elu_feature_map(x): return torch.nn.functional.elu(x) + 1 class LinearAttention(Module): def __init__(self, eps=1e-06): super().__init__() self.feature_map = elu_feature_map self.eps = eps def forward(self, queries, keys, values, q_mask=None, kv_mask=None): """ Multi-Head linear attention proposed in "Transformers are RNNs" Args: queries: [N, L, H, D] keys: [N, S, H, D] values: [N, S, H, D] q_mask: [N, L] kv_mask: [N, S] Returns: queried_values: (N, L, H, D) """ Q = self.feature_map(queries) K = self.feature_map(keys) if q_mask is not None: Q = Q * q_mask[:, :, None, None] if kv_mask is not None: K = K * kv_mask[:, :, None, None] values = values * kv_mask[:, :, None, None] v_length = values.size(1) values = values / v_length KV = torch.einsum('nshd,nshv->nhdv', K, values) Z = 1 / (torch.einsum('nlhd,nhd->nlh', Q, K.sum(dim=1)) + self.eps) queried_values = torch.einsum('nlhd,nhdv,nlh->nlhv', Q, KV, Z ) * v_length return queried_values.contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_elu_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp25 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tmp10 = tmp9 > tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.expm1(tmp11) tmp13 = tmp12 * tmp3 tmp14 = tl.where(tmp10, tmp11, tmp13) tmp15 = tmp14 + tmp3 tmp16 = tmp8 + tmp15 tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp3 tmp22 = tl.where(tmp18, tmp19, tmp21) tmp23 = tmp22 + tmp3 tmp24 = tmp16 + tmp23 tmp26 = tmp25 > tmp1 tmp27 = tmp25 * tmp3 tmp28 = libdevice.expm1(tmp27) tmp29 = tmp28 * tmp3 tmp30 = tl.where(tmp26, tmp27, tmp29) tmp31 = tmp30 + tmp3 tmp32 = tmp24 + tmp31 tl.store(out_ptr0 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask) tl.store(out_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + x5, xmask) tmp1 = 1e-06 tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp3 / tmp2 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = 4.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp10, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_elu_sum_0[grid(64)](arg1_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_1[grid(256)](arg0_1, buf1, buf6, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), out=buf2) del buf0 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_2[grid(16, 16)](arg1_1, buf3, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_3[grid(256)](arg2_1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) del buf3 buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), buf5, out=buf7) del buf5 buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_mul_4[grid(256)](buf2, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf7 return buf8, def elu_feature_map(x): return torch.nn.functional.elu(x) + 1 class LinearAttentionNew(Module): def __init__(self, eps=1e-06): super().__init__() self.feature_map = elu_feature_map self.eps = eps def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
lee-vius/LoFTR
LinearAttention
false
10,470
[ "Apache-2.0" ]
0
dd9add373a20696fb6f020f4fda38bca7a91cdd9
https://github.com/lee-vius/LoFTR/tree/dd9add373a20696fb6f020f4fda38bca7a91cdd9
Descendant
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yk/cykarwk3qy3pmem22bdbqxnd35nfbs5r3u5357r5uoderrncltwa.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 20 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/pr/cprkqcgqlxorbpnularzqdmik33w6yjgt5sc4pc7na35izz6l3qp.py # Topologically Sorted Source Nodes: [max_pool2d, conv_out], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv_out => relu # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 72000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x3 = (xindex // 30) x2 = (xindex // 18000) x4 = xindex % 18000 x5 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x4 + (18048*x2)), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp18, xmask) tl.store(out_ptr2 + (x4 + (18048*x2)), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 288000, grid=grid(288000), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.int8) buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1), torch.float32) buf4 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.bool) # Topologically Sorted Source Nodes: [max_pool2d, conv_out], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1.run(buf1, buf2, buf3, buf4, 72000, grid=grid(72000), stream=stream0) return (buf3, primals_1, primals_3, buf1, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class Descendant(nn.Module): """Descendant descendantEncoder model for ADDA.""" def __init__(self): """Init Descendant descendantEncoder.""" super(Descendant, self).__init__() self.restored = False self.conv1 = nn.Conv2d(1, 20, kernel_size=5) self.pool1 = nn.MaxPool2d(kernel_size=2) def forward(self, input): """Forward the Descendant.""" conv_out = F.relu(self.pool1(self.conv1(input))) out = conv_out return out def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 72000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x3 = xindex // 30 x2 = xindex // 18000 x4 = xindex % 18000 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x4 + 18048 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) tl.store(out_ptr2 + (x4 + 18048 * x2), tmp20, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(288000)](buf1, primals_2, 288000, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.int8) buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1), torch.float32) buf4 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_1[grid (72000)](buf1, buf2, buf3, buf4, 72000, XBLOCK=512, num_warps=8, num_stages=1) return buf3, primals_1, primals_3, buf1, buf2, buf4 class DescendantNew(nn.Module): """Descendant descendantEncoder model for ADDA.""" def __init__(self): """Init Descendant descendantEncoder.""" super(DescendantNew, self).__init__() self.restored = False self.conv1 = nn.Conv2d(1, 20, kernel_size=5) self.pool1 = nn.MaxPool2d(kernel_size=2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lindagaw/Kadara
Descendant
false
10,471
[ "MIT" ]
0
f1059b69a581344ca460c8df02ac3f73f3fbcba1
https://github.com/lindagaw/Kadara/tree/f1059b69a581344ca460c8df02ac3f73f3fbcba1
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xx/cxxsy7adcakcv5c6imnaqsvf3ebhgbph77vaoembzakbqdrjycbc.py # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, x], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # std => sqrt, var # sub => sub # truediv => div # x => add_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zd/czdzwoe4pjzwt5uqsqnvjev2fqbcqpnioydhfcnlbkgk527hzabw.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3o/c3oaxjlvvp4afd7ykuqeeshinhwcjxyjj3abqkwr3yqqinwm4e4f.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2w/c2wy7nnmjg5t737ww7jovvpbthc2goznalidqgo6rnve5cme7k3g.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default_2, %div_tensor), kwargs = {}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cc/ccc5cubvwrrtlmcdkkaw7lrpubmmsow2eocc44vbuqtnlv363xv3.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/we/cwe54p4p4jvwbdktkpj3wy2coheu6f3r3dgvi7ozm7xjfk4mgbwx.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c3/cc3wjckmv52z5p6lagnrhsfwt53rzdfhvzlxkm5tgkwbs3kuzwax.py # Topologically Sorted Source Nodes: [res_1], Original ATen: [aten.add] # Source node to ATen node mapping: # res_1 => add_2 # Graph fragment: # %add_2 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %add_1), kwargs = {}) triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nr/cnrllyxkh5tmxzmndufu4v6arhz3nvwpyestxuypvazb6covpgb6.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/he/chevf4d6tadiz3y2a2abr2lj2bvo3wyfykoivwj2s4xedp3vdjuf.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] # Source node to ATen node mapping: # out_1 => add_5 # Graph fragment: # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (16, 4), (4, 1)) assert_size_stride(primals_15, (16, ), (1, )) assert_size_stride(primals_16, (4, 16), (16, 1)) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, x], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 64, grid=grid(64), stream=stream0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf2, primals_7, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf6, buf7, buf8, 256, grid=grid(256), stream=stream0) del buf6 buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(buf3, primals_9, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del primals_9 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [res_1], Original ATen: [aten.add] triton_poi_fused_add_6.run(buf13, primals_11, buf0, 64, grid=grid(64), stream=stream0) del primals_11 buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_3, truediv_2, add_4], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_12, buf13, primals_13, buf14, 64, grid=grid(64), stream=stream0) del primals_13 buf15 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 16), (64, 16, 1), 0); del buf15 # reuse buf19 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_7.run(buf16, primals_15, buf19, 256, grid=grid(256), stream=stream0) del primals_15 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf16, (16, 16), (16, 1), 0), reinterpret_tensor(primals_16, (16, 4), (1, 16), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf18, buf13, primals_17, 64, grid=grid(64), stream=stream0) del primals_17 return (buf18, primals_1, primals_12, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(buf16, (16, 16), (16, 1), 0), primals_16, buf19, primals_14, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from typing import Optional import torch.nn.functional as F from torch import nn def attention(query, key, value, mask=None, dropout=None): """Compute 'Scaled Dot Product Attention' """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) weights = F.softmax(scores, dim=-1) if dropout is not None: weights = dropout(weights) out = torch.matmul(weights, value) return out, weights class LayerNorm(nn.Module): """Construct a layernorm module (See citation for details).""" def __init__(self, features: 'int', eps: 'float'=1e-06): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x: 'torch.Tensor'): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): """Take in model size and number of heads.""" super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 self.d_head = d_model // h self.h = h self.proj_q = nn.Linear(d_model, d_model) self.proj_k = nn.Linear(d_model, d_model) self.proj_v = nn.Linear(d_model, d_model) self.ret_proj = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None): """ query: (batch_size, seq_len, dmodel) key: (batch_size, seq_len, dmodel) value: (batch_size, seq_len, dmodel) mask: (batch_size, seq_len) """ if mask is not None: mask = mask.unsqueeze(1) batch_size = query.size(0) seq_len = query.size(1) query = self.proj_q(query).view(batch_size, seq_len, self.h, self. d_head).transpose(1, 2) key = self.proj_k(key).view(batch_size, seq_len, self.h, self.d_head ).transpose(1, 2) value = self.proj_v(value).view(batch_size, seq_len, self.h, self. d_head).transpose(1, 2) x, _ = attention(query, key, value, mask=mask, dropout=self.dropout) x = x.transpose(1, 2).contiguous().view(batch_size, seq_len, self.h * self.d_head) return self.ret_proj(x) class PositionwiseFeedForward(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class Block(nn.Module): """A standard Decoder block for GPT.""" def __init__(self, d_model: 'int', n_heads: 'int', dropout: 'float'=0.1): super(Block, self).__init__() self.d_model = d_model self.d_inner = 4 * self.d_model self.n_heads = n_heads self.dropout = dropout self.layer_norm1 = LayerNorm(self.d_model) self.layer_norm2 = LayerNorm(self.d_model) self.multi_head_attn = MultiHeadedAttention(self.n_heads, self. d_model, self.dropout) self.feed_fwd = PositionwiseFeedForward(d_model, self.d_inner, self .dropout) def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None): x = self.layer_norm1(x) attn_out = self.multi_head_attn(x, x, x, mask) res_1 = attn_out + x feed_fwd_out = self.feed_fwd(self.layer_norm2(res_1)) out = res_1 + feed_fwd_out return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from typing import Optional import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (16, 4), (4, 1)) assert_size_stride(primals_15, (16,), (1,)) assert_size_stride(primals_16, (4, 16), (16, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_2, primals_1, primals_3, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_1[grid(16, 4)](buf2, primals_7, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_3[grid(256)](buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_4[grid(16, 4)](buf3, primals_9, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused_add_6[grid(64)](buf13, primals_11, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_12, buf13, primals_13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf15 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 16), (64, 16, 1), 0) del buf15 buf19 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(256)](buf16, primals_15, buf19, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 16), (16, 1), 0), reinterpret_tensor(primals_16, (16, 4), (1, 16), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 triton_poi_fused_add_8[grid(64)](buf18, buf13, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_17 return (buf18, primals_1, primals_12, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor( buf16, (16, 16), (16, 1), 0), primals_16, buf19, primals_14, primals_10, primals_8, primals_6, primals_4) def attention(query, key, value, mask=None, dropout=None): """Compute 'Scaled Dot Product Attention' """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) weights = F.softmax(scores, dim=-1) if dropout is not None: weights = dropout(weights) out = torch.matmul(weights, value) return out, weights class LayerNorm(nn.Module): """Construct a layernorm module (See citation for details).""" def __init__(self, features: 'int', eps: 'float'=1e-06): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x: 'torch.Tensor'): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): """Take in model size and number of heads.""" super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 self.d_head = d_model // h self.h = h self.proj_q = nn.Linear(d_model, d_model) self.proj_k = nn.Linear(d_model, d_model) self.proj_v = nn.Linear(d_model, d_model) self.ret_proj = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None): """ query: (batch_size, seq_len, dmodel) key: (batch_size, seq_len, dmodel) value: (batch_size, seq_len, dmodel) mask: (batch_size, seq_len) """ if mask is not None: mask = mask.unsqueeze(1) batch_size = query.size(0) seq_len = query.size(1) query = self.proj_q(query).view(batch_size, seq_len, self.h, self. d_head).transpose(1, 2) key = self.proj_k(key).view(batch_size, seq_len, self.h, self.d_head ).transpose(1, 2) value = self.proj_v(value).view(batch_size, seq_len, self.h, self. d_head).transpose(1, 2) x, _ = attention(query, key, value, mask=mask, dropout=self.dropout) x = x.transpose(1, 2).contiguous().view(batch_size, seq_len, self.h * self.d_head) return self.ret_proj(x) class PositionwiseFeedForward(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class BlockNew(nn.Module): """A standard Decoder block for GPT.""" def __init__(self, d_model: 'int', n_heads: 'int', dropout: 'float'=0.1): super(BlockNew, self).__init__() self.d_model = d_model self.d_inner = 4 * self.d_model self.n_heads = n_heads self.dropout = dropout self.layer_norm1 = LayerNorm(self.d_model) self.layer_norm2 = LayerNorm(self.d_model) self.multi_head_attn = MultiHeadedAttention(self.n_heads, self. d_model, self.dropout) self.feed_fwd = PositionwiseFeedForward(d_model, self.d_inner, self .dropout) def forward(self, input_0): primals_2 = self.layer_norm1.a_2 primals_3 = self.layer_norm1.b_2 primals_5 = self.layer_norm2.a_2 primals_7 = self.layer_norm2.b_2 primals_4 = self.multi_head_attn.proj_q.weight primals_9 = self.multi_head_attn.proj_q.bias primals_6 = self.multi_head_attn.proj_k.weight primals_11 = self.multi_head_attn.proj_k.bias primals_8 = self.multi_head_attn.proj_v.weight primals_12 = self.multi_head_attn.proj_v.bias primals_10 = self.multi_head_attn.ret_proj.weight primals_13 = self.multi_head_attn.ret_proj.bias primals_14 = self.feed_fwd.w_1.weight primals_15 = self.feed_fwd.w_1.bias primals_16 = self.feed_fwd.w_2.weight primals_17 = self.feed_fwd.w_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
malhotraa/transformer-experiments
Block
false
10,472
[ "MIT" ]
0
82931b89b14d26dbd6e4ffef8d6f2fd8b7279c0f
https://github.com/malhotraa/transformer-experiments/tree/82931b89b14d26dbd6e4ffef8d6f2fd8b7279c0f
BinaryLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ha/chaw4j4cijilv4rurwmzvs3nnol6fmwmio6yneprgmynztfqb3kg.py # Topologically Sorted Source Nodes: [pos_loss, sum_1, neg_loss, sum_2, add, loss], Original ATen: [aten.neg, aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # add => add # loss => div # neg_loss => neg_1 # pos_loss => neg # sum_1 => sum_3 # sum_2 => sum_4 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_1,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %sum_4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 8), kwargs = {}) triton_per_fused_add_div_neg_sum_1 = async_compile.triton('triton_per_fused_add_div_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp17 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp19 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp22 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp25 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp2 = tl_math.exp(tmp1) tmp3 = tl_math.exp(tmp0) tmp4 = tmp2 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp18 = tl_math.exp(tmp17) tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tl_math.log(tmp27) tmp29 = tmp17 - tmp28 tmp30 = -tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = tmp16 + tmp33 tmp35 = 0.125 tmp36 = tmp34 * tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp36, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax_1], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_0.run(arg1_1, buf2, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [pos_loss, sum_1, neg_loss, sum_2, add, loss], Original ATen: [aten.neg, aten.sum, aten.add, aten.div] triton_per_fused_add_div_neg_sum_1.run(buf4, buf0, buf2, 1, 64, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class BinaryLoss(nn.Module): def __init__(self): super(BinaryLoss, self).__init__() def forward(self, pos_score, neg_score): pos_loss = -F.log_softmax(pos_score)[:, 1] neg_loss = -F.log_softmax(neg_score)[:, 0] loss = (pos_loss.sum() + neg_loss.sum()) / (pos_loss.size(0) + neg_loss.size(0)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused_add_div_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp17 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp19 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp22 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp25 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tl_math.exp(tmp1) tmp3 = tl_math.exp(tmp0) tmp4 = tmp2 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp18 = tl_math.exp(tmp17) tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tl_math.log(tmp27) tmp29 = tmp17 - tmp28 tmp30 = -tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = tmp16 + tmp33 tmp35 = 0.125 tmp36 = tmp34 * tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1 del buf1 triton_per_fused_add_div_neg_sum_1[grid(1)](buf4, buf0, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class BinaryLossNew(nn.Module): def __init__(self): super(BinaryLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
melster1010/VIAME
BinaryLoss
false
10,473
[ "BSD-3-Clause" ]
0
0062265088aae65effbfcd130bfb874c343c785f
https://github.com/melster1010/VIAME/tree/0062265088aae65effbfcd130bfb874c343c785f
Successor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yk/cykarwk3qy3pmem22bdbqxnd35nfbs5r3u5357r5uoderrncltwa.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 20 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3k/c3k57g5yhtujuoxgs7jwv6kipspabfze46wndcbnl5hbd4s7xuz5.py # Topologically Sorted Source Nodes: [max_pool2d, conv_out], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # conv_out => relu # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 72000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x3 = (xindex // 30) x2 = (xindex // 18000) x4 = xindex % 18000 x5 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + (18048*x2)), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7m/c7mejpb4ik5p2rmp2v4icum4owyptzcyjkwyolwpj6vuvfnck64b.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 135200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 676) % 50 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4l/c4l22elswhi7fd5thcrckd6rdgebe6e6s2sw2sfhhavnlajf5vkq.py # Topologically Sorted Source Nodes: [max_pool2d_1, conv_out_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv_out_1 => relu_1 # max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 33800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 13 x3 = (xindex // 13) x2 = (xindex // 8450) x4 = xindex % 8450 x5 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (26 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (27 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x4 + (8576*x2)), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp18, xmask) tl.store(out_ptr2 + (x4 + (8576*x2)), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1)) assert_size_stride(primals_5, (50, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 288000, grid=grid(288000), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.int8) buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d, conv_out], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf1, buf2, buf3, 72000, grid=grid(72000), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 50, 26, 26), (33800, 676, 26, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 135200, grid=grid(135200), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 50, 13, 13), (8576, 169, 13, 1), torch.int8) buf7 = empty_strided_cuda((4, 50, 13, 13), (8450, 169, 13, 1), torch.float32) buf8 = empty_strided_cuda((4, 50, 13, 13), (8576, 169, 13, 1), torch.bool) # Topologically Sorted Source Nodes: [max_pool2d_1, conv_out_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3.run(buf5, buf6, buf7, buf8, 33800, grid=grid(33800), stream=stream0) return (buf7, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((50, 20, 5, 5), (500, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class Successor(nn.Module): """Successor successorEncoder model for ADDA.""" def __init__(self): """Init Successor successorEncoder.""" super(Successor, self).__init__() self.restored = False self.conv1 = nn.Conv2d(1, 20, kernel_size=5) self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2 = nn.Conv2d(20, 50, kernel_size=5) self.dropout2 = nn.Dropout2d() self.pool2 = nn.MaxPool2d(kernel_size=2) def forward(self, input): """Forward the Successor.""" conv_out = F.relu(self.pool1(self.conv1(input))) conv_out = F.relu(self.pool2(self.dropout2(self.conv2(conv_out)))) out = conv_out return out def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 72000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x3 = xindex // 30 x2 = xindex // 18000 x4 = xindex % 18000 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + 18048 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 135200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 676 % 50 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 33800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 13 x3 = xindex // 13 x2 = xindex // 8450 x4 = xindex % 8450 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x4 + 8576 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) tl.store(out_ptr2 + (x4 + 8576 * x2), tmp20, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1)) assert_size_stride(primals_5, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(288000)](buf1, primals_2, 288000, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.int8) buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(72000)](buf1, buf2, buf3, 72000, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 50, 26, 26), (33800, 676, 26, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(135200)](buf5, primals_5, 135200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 50, 13, 13), (8576, 169, 13, 1), torch.int8) buf7 = empty_strided_cuda((4, 50, 13, 13), (8450, 169, 13, 1), torch.float32) buf8 = empty_strided_cuda((4, 50, 13, 13), (8576, 169, 13, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid (33800)](buf5, buf6, buf7, buf8, 33800, XBLOCK=256, num_warps=4, num_stages=1) return (buf7, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8) class SuccessorNew(nn.Module): """Successor successorEncoder model for ADDA.""" def __init__(self): """Init Successor successorEncoder.""" super(SuccessorNew, self).__init__() self.restored = False self.conv1 = nn.Conv2d(1, 20, kernel_size=5) self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2 = nn.Conv2d(20, 50, kernel_size=5) self.dropout2 = nn.Dropout2d() self.pool2 = nn.MaxPool2d(kernel_size=2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
lindagaw/Kadara
Successor
false
10,474
[ "MIT" ]
0
f1059b69a581344ca460c8df02ac3f73f3fbcba1
https://github.com/lindagaw/Kadara/tree/f1059b69a581344ca460c8df02ac3f73f3fbcba1
MHSA
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4j/c4jcxsis7ek5ymkow2uiwddmhsmsyxijcvzvkbfjbavvo7na3asf.py # Topologically Sorted Source Nodes: [content_position], Original ATen: [aten.clone] # Source node to ATen node mapping: # content_position => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (x0 % 4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + ((x3 // 4)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/yz/cyzrujblrx7okcdxhnceg335n3kw4ymde6htxmd352eiillyccf2.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_10, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_10, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel): xnumel = 4 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.load(in_ptr1 + (r1 + (1024*x0)), None) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0)) tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = tmp7 / tmp10 tl.store(out_ptr2 + (r1 + (1024*x0)), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/le/clegwyiweed5ksuxpqumnnpzduznhg57pkc32gtm37gqnyk2iaoq.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # out => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%view_15, [2, 2], [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4, 1, 1, 4), (16, 4, 4, 4, 1)) assert_size_stride(primals_9, (1, 4, 1, 4, 1), (16, 4, 4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf3, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [content_position], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_8, primals_9, buf4, 256, grid=grid(256), stream=stream0) del primals_8 del primals_9 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [content_position], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf6, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf7 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [content_content], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf6, (16, 1, 16), (16, 0, 1), 0), out=buf7) buf10 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_2.run(buf7, buf5, buf10, 4, 1024, grid=grid(4), stream=stream0) del buf5 del buf7 buf11 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf11, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf12 = empty_strided_cuda((16, 1, 16), (16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 16), (16, 0, 1), 0), reinterpret_tensor(buf10, (16, 16, 16), (256, 16, 1), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_3.run(buf12, buf13, 64, grid=grid(64), stream=stream0) return (buf13, primals_1, primals_2, primals_4, primals_6, buf10, reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf11, (16, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf6, (16, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4, 1, 1, 4), (16, 4, 4, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 4, 1, 4, 1), (16, 4, 4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MHSA(nn.Module): def __init__(self, height, width, dim, head): super(MHSA, self).__init__() self.head = head self.r_h = nn.Parameter(data=torch.randn(1, head, dim // head, 1, height), requires_grad=True) self.r_w = nn.Parameter(data=torch.randn(1, head, dim // head, width, 1), requires_grad=True) self.w_q = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size =1, stride=1, bias=True) self.w_k = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size =1, stride=1, bias=True) self.w_v = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size =1, stride=1, bias=True) self.softmax = nn.Softmax(dim=-1) self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) def forward(self, x): batch, dim, height, width = x.size() q = self.w_q(x).view(batch, self.head, dim // self.head, -1).permute( 0, 1, 3, 2) k = self.w_k(x).view(batch, self.head, dim // self.head, -1) v = self.w_v(x).view(batch, self.head, dim // self.head, -1) r = (self.r_h + self.r_w).view(1, self.head, dim // self.head, -1) content_position = torch.matmul(q, r) content_content = torch.matmul(q, k) energy = (content_content + content_position).view(batch, -1) attention = self.softmax(energy).view(batch, self.head, height * width, height * width) feature = torch.matmul(v, attention).view(batch, dim, height, width) out = self.pool(feature) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'height': 4, 'width': 4, 'dim': 4, 'head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + x0 % 4), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3 // 4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.load(in_ptr1 + (r1 + 1024 * x0), None) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0)) tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = tmp7 / tmp10 tl.store(out_ptr2 + (r1 + 1024 * x0), tmp11, None) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4, 1, 1, 4), (16, 4, 4, 4, 1)) assert_size_stride(primals_9, (1, 4, 1, 4, 1), (16, 4, 4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf3, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32 ) triton_poi_fused_clone_1[grid(256)](primals_8, primals_9, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 del primals_9 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf6 = buf1 del buf1 triton_poi_fused_convolution_0[grid(256)](buf6, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf7 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf6, (16, 1, 16), (16, 0, 1), 0), out=buf7) buf10 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) triton_per_fused__softmax_2[grid(4)](buf7, buf5, buf10, 4, 1024, num_warps=8, num_stages=1) del buf5 del buf7 buf11 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf11, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf12 = empty_strided_cuda((16, 1, 16), (16, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 16), (16, 0, 1 ), 0), reinterpret_tensor(buf10, (16, 16, 16), (256, 16, 1), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_avg_pool2d_3[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) return (buf13, primals_1, primals_2, primals_4, primals_6, buf10, reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf11, (16, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf6, (16, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)) class MHSANew(nn.Module): def __init__(self, height, width, dim, head): super(MHSANew, self).__init__() self.head = head self.r_h = nn.Parameter(data=torch.randn(1, head, dim // head, 1, height), requires_grad=True) self.r_w = nn.Parameter(data=torch.randn(1, head, dim // head, width, 1), requires_grad=True) self.w_q = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size =1, stride=1, bias=True) self.w_k = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size =1, stride=1, bias=True) self.w_v = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size =1, stride=1, bias=True) self.softmax = nn.Softmax(dim=-1) self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) def forward(self, input_0): primals_8 = self.r_h primals_9 = self.r_w primals_2 = self.w_q.weight primals_3 = self.w_q.bias primals_4 = self.w_k.weight primals_5 = self.w_k.bias primals_6 = self.w_v.weight primals_7 = self.w_v.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
lzu-zhanghr/vision-transformer-zoo
MHSA
false
10,475
[ "MIT" ]
0
2cc6e3551c39816acc95ade040bbf9bd115a6b03
https://github.com/lzu-zhanghr/vision-transformer-zoo/tree/2cc6e3551c39816acc95ade040bbf9bd115a6b03
IdentityPadding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/i5/ci5jtsingp2qnrodx6wlbyak3tjanrom5rpybyruspgrcwq6flhn.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class IdentityPadding(nn.Module): def __init__(self, in_channels, out_channels, stride): super(IdentityPadding, self).__init__() self.pooling = nn.MaxPool2d(1, stride=stride) self.add_channels = out_channels - in_channels def forward(self, x): out = F.pad(x, (0, 0, 0, 0, 0, self.add_channels)) out = self.pooling(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class IdentityPaddingNew(nn.Module): def __init__(self, in_channels, out_channels, stride): super(IdentityPaddingNew, self).__init__() self.pooling = nn.MaxPool2d(1, stride=stride) self.add_channels = out_channels - in_channels def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
moerashidi/deep_ensemble
IdentityPadding
false
10,476
[ "MIT" ]
0
51cd890643b0f01849583e6585eef241776b0ef4
https://github.com/moerashidi/deep_ensemble/tree/51cd890643b0f01849583e6585eef241776b0ef4
MutliClassNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3dj3iqo4mxp3jap6oqywrnhpoxhtxzi373qtxftjspbz3o3pb3.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 1000 x2 = xindex % 4000 x3 = (xindex // 4000) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (4096*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1000, 4), (4, 1)) assert_size_stride(primals_2, (1000, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1000), (1000, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 64000, grid=grid(64000), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), reinterpret_tensor(primals_4, (1000, 4), (1, 1000), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), buf3, primals_4, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1000, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1000, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1000), (1000, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MutliClassNN(nn.Module): def __init__(self, num_features, num_labels): super(MutliClassNN, self).__init__() self.fc1 = torch.nn.Linear(num_features, 1000) self.fc3 = torch.nn.Linear(1000, num_labels) def forward(self, x): x = torch.relu(self.fc1(x)) x = torch.sigmoid(self.fc3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'num_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 1000 x2 = xindex % 4000 x3 = xindex // 4000 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 4096 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1000, 4), (4, 1)) assert_size_stride(primals_2, (1000,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1000), (1000, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64000)](buf1, primals_2, buf4, 64000, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0 ), reinterpret_tensor(primals_4, (1000, 4), (1, 1000), 0), out=buf2 ) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0 ), buf3, primals_4, buf4 class MutliClassNNNew(nn.Module): def __init__(self, num_features, num_labels): super(MutliClassNNNew, self).__init__() self.fc1 = torch.nn.Linear(num_features, 1000) self.fc3 = torch.nn.Linear(1000, num_labels) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc3.weight primals_5 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mhagenow01/ECE532ClassifierComparison
MutliClassNN
false
10,477
[ "MIT" ]
0
5066931d97aae2c25c8b9451fe3d12021f5748a1
https://github.com/mhagenow01/ECE532ClassifierComparison/tree/5066931d97aae2c25c8b9451fe3d12021f5748a1
SymKlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py # Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax] # Source node to ATen node mapping: # log_softmax_1 => amax_2, sub_4 # softmax => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_2), kwargs = {}) triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/7m/c7m62lqyfgv73n2cb7qnzr2xodmurvv7yy6z2m7nhu5m3pfdjjxe.py # Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # softmax_1 => amax_3, exp_3, sub_6 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {}) triton_poi_fused__log_softmax__softmax_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/t4/ct4y23pxxoeimylfghb3zgxs2sgexranitqqpowxqihbyqj4rh4q.py # Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean, aten.add] # Source node to ATen node mapping: # kl_div => eq, full_default, full_default_1, isnan, log_1, mean, mul, mul_1, sub_3, where, where_1 # kl_div_1 => eq_1, full_default_2, full_default_3, isnan_1, log_3, mean_1, mul_2, mul_3, sub_7, where_2, where_3 # log_softmax => exp, log, sub_1, sum_1 # log_softmax_1 => exp_2, log_2, sub_5, sum_3 # loss => add # loss_1 => mul_4 # softmax => div, sum_2 # softmax_1 => div_1, sum_4 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1], True), kwargs = {}) # %div_1 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {}) # %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_1,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_1, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %log_3), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1], True), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sub_5), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_7,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {}) triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp37 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp52 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp54 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = tmp44 == tmp10 tmp47 = tl_math.log(tmp44) tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp10, tmp48) tmp50 = tl.where(tmp45, tmp15, tmp49) tmp53 = tl_math.exp(tmp52) tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp63 = tl_math.log(tmp62) tmp64 = tmp51 - tmp63 tmp65 = tmp44 * tmp64 tmp66 = tmp50 - tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = _tmp68 + tmp67 _tmp68 = tl.where(rmask, tmp69, _tmp68) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp68 = tl.sum(_tmp68, 1)[:, None] tmp70 = 256.0 tmp71 = tmp34 / tmp70 tmp72 = tmp68 / tmp70 tmp73 = tmp71 + tmp72 tmp74 = 1.0 tmp75 = tmp73 * tmp74 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp75, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf6, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax] triton_poi_fused__log_softmax__softmax_1.run(arg0_1, buf2, buf4, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean, aten.add] triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2.run(buf8, buf0, buf2, buf4, buf6, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 del buf4 del buf6 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class SymKlCriterion(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """input/target: logits """ input = input.float() target = target.float() loss = F.kl_div(F.log_softmax(input, dim=-1), F.softmax(target. detach(), dim=-1)) + F.kl_div(F.log_softmax(target, dim=-1), F. softmax(input.detach(), dim=-1)) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2(in_out_ptr0 , in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp37 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp40 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp52 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp54 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp60 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = tmp44 == tmp10 tmp47 = tl_math.log(tmp44) tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp10, tmp48) tmp50 = tl.where(tmp45, tmp15, tmp49) tmp53 = tl_math.exp(tmp52) tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp63 = tl_math.log(tmp62) tmp64 = tmp51 - tmp63 tmp65 = tmp44 * tmp64 tmp66 = tmp50 - tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = _tmp68 + tmp67 _tmp68 = tl.where(rmask, tmp69, _tmp68) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp68 = tl.sum(_tmp68, 1)[:, None] tmp70 = 256.0 tmp71 = tmp34 / tmp70 tmp72 = tmp68 / tmp70 tmp73 = tmp71 + tmp72 tmp74 = 1.0 tmp75 = tmp73 * tmp74 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp75, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_1[grid(256)](arg0_1, buf2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3 del buf3 triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2[grid(1) ](buf8, buf0, buf2, buf4, buf6, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 del buf4 del buf6 return buf8, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class SymKlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mahartmann/mt-dnn
SymKlCriterion
false
10,479
[ "MIT" ]
0
c9aa3379dc255fd8fc40f24b6cd508f6a645b32f
https://github.com/mahartmann/mt-dnn/tree/c9aa3379dc255fd8fc40f24b6cd508f6a645b32f
KlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/g5/cg5f2rptqnpi2mrqpqc4tujqpbrrrjrse6plhgftx425znsffpfv.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/i2/ci2yamwiz72oxetvudslk7ph4kozejsdj65prr6khbbmwns6aynp.py # Topologically Sorted Source Nodes: [softmax, loss, log_softmax, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # loss => eq, full_default, full_default_1, isnan, log_1, mean, mul, mul_1, sub_3, where, where_1 # loss_1 => mul_2 # softmax => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 256.0 tmp37 = tmp34 / tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, loss, log_softmax, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean] triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class KlCriterion(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """input/target: logits """ input = input.float() target = target.float() loss = F.kl_div(F.log_softmax(input, dim=-1), F.softmax(target, dim=-1) ) loss = loss * self.alpha return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 256.0 tmp37 = tmp34 / tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__log_softmax__softmax_mean_mul_sub_xlogy_2[grid(1)]( buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 return buf4, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class KlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mahartmann/mt-dnn
KlCriterion
false
10,480
[ "MIT" ]
0
c9aa3379dc255fd8fc40f24b6cd508f6a645b32f
https://github.com/mahartmann/mt-dnn/tree/c9aa3379dc255fd8fc40f24b6cd508f6a645b32f
FlowHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3p/c3pccxgnuu4ndvejdrgnnzzkvckhydfsbcaf7lwd5g3lofpww4cc.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sc/cscsiwn4jzs35kmdkiqiai55z42bpakheiazewur3x5beq7teiv3.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 2 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_4, (2, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 4194304, grid=grid(4194304), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 64, 64), (8192, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 32768, grid=grid(32768), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 128, 64, 64), (524288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FlowHead(nn.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = nn.ReLU(inplace=True) def forward(self, x): return self.conv2(self.relu(self.conv1(x))) def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 2 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_4, (2, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(4194304)](buf1, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 64, 64), (8192, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(32768)](buf3, primals_5, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class FlowHeadNew(nn.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHeadNew, self).__init__() self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
luyu94/RAFT
FlowHead
false
10,481
[ "BSD-3-Clause" ]
0
d0a37db031af49a5d0d9b524d214acc989becf5b
https://github.com/luyu94/RAFT/tree/d0a37db031af49a5d0d9b524d214acc989becf5b
DPLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/cm/ccmo7fnacrzz2bpb3kjhjwznydxc2ydumd6dycmnumtbvfyp23ld.py # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # c_t => add_1 # f_t => sigmoid_1 # g_t => tanh # h_t => mul_2 # i_t => sigmoid # mul => mul # mul_1 => mul_1 # o_t => sigmoid_2 # tanh_1 => tanh_1 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_2,), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_7), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp35, xmask) tl.store(out_ptr4 + (x2), tmp38, xmask) tl.store(out_ptr5 + (x2), tmp40, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0.run(buf0, primals_2, buf1, primals_5, primals_7, buf2, buf4, buf3, buf5, buf7, buf6, 16, grid=grid(16), stream=stream0) del buf0 del buf1 del primals_2 del primals_5 return (buf6, buf5, primals_3, primals_6, primals_7, buf2, buf3, buf4, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from typing import Tuple class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCell(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: gates = self.ih(x) + self.hh(h_prev) i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates, self.hidden_size, 1) i_t = torch.sigmoid(i_t_input) f_t = torch.sigmoid(f_t_input) g_t = torch.tanh(g_t_input) o_t = torch.sigmoid(o_t_input) c_t = f_t * c_prev + i_t * g_t h_t = o_t * torch.tanh(c_t) return h_t, c_t def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp35, xmask) tl.store(out_ptr4 + x2, tmp38, xmask) tl.store(out_ptr5 + x2, tmp40, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(16)](buf0 , primals_2, buf1, primals_5, primals_7, buf2, buf4, buf3, buf5, buf7, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 return (buf6, buf5, primals_3, primals_6, primals_7, buf2, buf3, buf4, buf5, buf7) class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCellNew(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, input_0, input_1, input_2): primals_1 = self.ih.weight primals_2 = self.ih.bias primals_4 = self.hh.weight primals_5 = self.hh.bias primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
madhavajay/opacus
DPLSTMCell
false
10,482
[ "Apache-2.0" ]
0
7ae098764b4cf2388c66e263dd8d56bca0a290d0
https://github.com/madhavajay/opacus/tree/7ae098764b4cf2388c66e263dd8d56bca0a290d0
PositionWiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] # Source node to ATen node mapping: # add => add # erf => erf # mul => mul # mul_1 => mul_1 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function by Hugging Face""" return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionWiseFeedForward(nn.Module): """ FeedForward Neural Networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.dim, cfg.dim_ff) self.fc2 = nn.Linear(cfg.dim_ff, cfg.dim) def forward(self, x): return self.fc2(gelu(self.fc1(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(dim=4, dim_ff=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 def gelu(x): """Implementation of the gelu activation function by Hugging Face""" return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionWiseFeedForwardNew(nn.Module): """ FeedForward Neural Networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.dim, cfg.dim_ff) self.fc2 = nn.Linear(cfg.dim_ff, cfg.dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
FengMingquan-sjtu/pytorchic-bert
PositionWiseFeedForward
false
10,483
[ "Apache-2.0" ]
0
83d616fb9c7e1d5c3646f9b6267ca912e2616d65
https://github.com/FengMingquan-sjtu/pytorchic-bert/tree/83d616fb9c7e1d5c3646f9b6267ca912e2616d65
AttentionUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/4u/c4uzfozwsgh3le5rfytdcr7fatpzxslwn6zrlgczuib4kxua4g25.py # Topologically Sorted Source Nodes: [add, sumTanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # sumTanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %view_1), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sumTanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf2, buf1, primals_6, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 del primals_6 buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [vProj], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, buf2, buf6, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F from torch.nn import init class AttentionUnit(nn.Module): def __init__(self, sDim, xDim, attDim): super(AttentionUnit, self).__init__() self.sDim = sDim self.xDim = xDim self.attDim = attDim self.sEmbed = nn.Linear(sDim, attDim) self.xEmbed = nn.Linear(xDim, attDim) self.wEmbed = nn.Linear(attDim, 1) def init_weights(self): init.normal_(self.sEmbed.weight, std=0.01) init.constant_(self.sEmbed.bias, 0) init.normal_(self.xEmbed.weight, std=0.01) init.constant_(self.xEmbed.bias, 0) init.normal_(self.wEmbed.weight, std=0.01) init.constant_(self.wEmbed.bias, 0) def forward(self, x, sPrev): batch_size, T, _ = x.size() x = x.view(-1, self.xDim) xProj = self.xEmbed(x) xProj = xProj.view(batch_size, T, -1) sPrev = sPrev.squeeze(0) sProj = self.sEmbed(sPrev) sProj = torch.unsqueeze(sProj, 1) sProj = sProj.expand(batch_size, T, self.attDim) sumTanh = torch.tanh(sProj + xProj) sumTanh = sumTanh.view(-1, self.attDim) vProj = self.wEmbed(sumTanh) vProj = vProj.view(batch_size, T) alpha = F.softmax(vProj, dim=1) return alpha def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'sDim': 4, 'xDim': 4, 'attDim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + x3, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(64)](buf2, buf1, primals_6, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 del primals_6 buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), primals_4, buf2, buf6, primals_7 class AttentionUnitNew(nn.Module): def __init__(self, sDim, xDim, attDim): super(AttentionUnitNew, self).__init__() self.sDim = sDim self.xDim = xDim self.attDim = attDim self.sEmbed = nn.Linear(sDim, attDim) self.xEmbed = nn.Linear(xDim, attDim) self.wEmbed = nn.Linear(attDim, 1) def init_weights(self): init.normal_(self.sEmbed.weight, std=0.01) init.constant_(self.sEmbed.bias, 0) init.normal_(self.xEmbed.weight, std=0.01) init.constant_(self.xEmbed.bias, 0) init.normal_(self.wEmbed.weight, std=0.01) init.constant_(self.wEmbed.bias, 0) def forward(self, input_0, input_1): primals_2 = self.sEmbed.weight primals_3 = self.sEmbed.bias primals_4 = self.xEmbed.weight primals_6 = self.xEmbed.bias primals_7 = self.wEmbed.weight primals_8 = self.wEmbed.bias primals_1 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
lohzhunyewcs/aster.pytorch
AttentionUnit
false
10,484
[ "MIT" ]
0
9441d386135a73b1baa3ec8c505f5eba99c26905
https://github.com/lohzhunyewcs/aster.pytorch/tree/9441d386135a73b1baa3ec8c505f5eba99c26905
FeatureResizer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/66/c666qvr725wwogti7syalhhjsndtv2n5sxzt6zi4wlesyjxocpic.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_1 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, 256, grid=grid(256), stream=stream0) del buf1 del buf2 del primals_5 return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn import torch.optim import torch.utils from torch import nn import torch.distributed class FeatureResizer(nn.Module): """ This class takes as input a set of embeddings of dimension C1 and outputs a set of embedding of dimension C2, after a linear transformation, dropout and normalization (LN). """ def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): super().__init__() self.do_ln = do_ln self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) self.dropout = nn.Dropout(dropout) def forward(self, encoder_features): x = self.fc(encoder_features) if self.do_ln: x = self.layer_norm(x) output = self.dropout(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_feat_size': 4, 'output_feat_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn import torch.optim import torch.utils from torch import nn import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](buf0, buf1, buf2, primals_4, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class FeatureResizerNew(nn.Module): """ This class takes as input a set of embeddings of dimension C1 and outputs a set of embedding of dimension C2, after a linear transformation, dropout and normalization (LN). """ def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): super().__init__() self.do_ln = do_ln self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.layer_norm.weight primals_5 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mmaaz60/mdetr
FeatureResizer
false
10,485
[ "Apache-2.0" ]
0
fe1394c67e76a6c7e521bbda77d8294714038a3a
https://github.com/mmaaz60/mdetr/tree/fe1394c67e76a6c7e521bbda77d8294714038a3a
PairwiseRankerModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/b6/cb6j3uzxk3hlq74h24e2ofv66auocp2fsayzgs46c5z7xwnji5sg.py # Topologically Sorted Source Nodes: [query_doc_1_rep, query_doc_2_rep], Original ATen: [aten.cat] # Source node to ATen node mapping: # query_doc_1_rep => cat # query_doc_2_rep => cat_1 # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_5], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x2), tmp10, xmask) tl.store(out_ptr1 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2v/c2vcxjglklzwx6o2kqa6tmbd6f33y5rn3si52kju3aeqb5iwawxx.py # Topologically Sorted Source Nodes: [compare], Original ATen: [aten.cat] # Source node to ATen node mapping: # compare => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sigmoid, %sigmoid_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 8, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/b7/cb7iq44xucvx4o4uio3etz5hrrkllxx5igr3vjyglpwcku6mi232.py # Topologically Sorted Source Nodes: [sigmoid_2], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid_2 => sigmoid_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 8), (8, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [query_doc_1_rep, query_doc_2_rep], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_5, buf0, buf2, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 del primals_5 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_3 del primals_4 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [compare], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf3, buf4, 32, grid=grid(32), stream=stream0) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (8, 1), (1, 8), 0), out=buf5) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [sigmoid_2], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf6, primals_7, 4, grid=grid(4), stream=stream0) del primals_7 return (buf6, buf0, buf1, buf2, buf3, buf4, buf6, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx import torch.nn as nn class PairwiseRankerModel(nn.Module): def __init__(self, embedding_size): super(PairwiseRankerModel, self).__init__() self.query_doc_transform = torch.nn.Linear(in_features= embedding_size * 2, out_features=embedding_size) self.compare_transform = torch.nn.Linear(in_features=embedding_size * 2, out_features=1) def forward(self, query_embedding, doc_1_embedding, doc_2_embedding): query_doc_1_rep = torch.cat((query_embedding, doc_1_embedding), 1) query_doc_1_rep = torch.sigmoid(self.query_doc_transform( query_doc_1_rep)) query_doc_2_rep = torch.cat((query_embedding, doc_2_embedding), 1) query_doc_2_rep = torch.sigmoid(self.query_doc_transform( query_doc_2_rep)) compare = torch.cat((query_doc_1_rep, query_doc_2_rep), 1) compare = self.compare_transform(compare) return torch.sigmoid(compare) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embedding_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 8), (8, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, primals_5, buf0, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_5 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_3 del primals_4 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](buf1, buf3, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (8, 1), (1, 8 ), 0), out=buf5) buf6 = buf5 del buf5 triton_poi_fused_sigmoid_2[grid(4)](buf6, primals_7, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_7 return buf6, buf0, buf1, buf2, buf3, buf4, buf6, primals_6 class PairwiseRankerModelNew(nn.Module): def __init__(self, embedding_size): super(PairwiseRankerModelNew, self).__init__() self.query_doc_transform = torch.nn.Linear(in_features= embedding_size * 2, out_features=embedding_size) self.compare_transform = torch.nn.Linear(in_features=embedding_size * 2, out_features=1) def forward(self, input_0, input_1, input_2): primals_3 = self.query_doc_transform.weight primals_4 = self.query_doc_transform.bias primals_6 = self.compare_transform.weight primals_7 = self.compare_transform.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mikhail-tsir/vespa-exloration
PairwiseRankerModel
false
10,486
[ "Apache-2.0" ]
0
9bebc00acb43021fa60c6e144fe4f1fa1d7719fc
https://github.com/mikhail-tsir/vespa-exloration/tree/9bebc00acb43021fa60c6e144fe4f1fa1d7719fc
DNN_Classifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5y/c5yq7wkgmmcygrawripwacy566sggsmh2mzk5izw35wk7ferohhu.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 6400, grid=grid(6400), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DNN_Classifier(torch.nn.Module): def __init__(self, input_dim, nb_categories, hidden_dim=100): super(DNN_Classifier, self).__init__() self.fc_1 = nn.Linear(input_dim, hidden_dim) self.fc_2 = nn.Linear(hidden_dim, nb_categories) self.softmax = nn.Softmax(dim=1) def forward(self, x_in): x = torch.relu(self.fc_1(x_in)) x = self.softmax(self.fc_2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'nb_categories': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1, primals_2, buf5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 100), (100, 1), 0 ), buf4, primals_4, buf5 class DNN_ClassifierNew(torch.nn.Module): def __init__(self, input_dim, nb_categories, hidden_dim=100): super(DNN_ClassifierNew, self).__init__() self.fc_1 = nn.Linear(input_dim, hidden_dim) self.fc_2 = nn.Linear(hidden_dim, nb_categories) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.fc_1.weight primals_2 = self.fc_1.bias primals_4 = self.fc_2.weight primals_5 = self.fc_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mleila/AGNews_Document_Classifcation
DNN_Classifier
false
10,487
[ "MIT" ]
0
1ff44edf1fcaaee582b79141a419d61df62da56e
https://github.com/mleila/AGNews_Document_Classifcation/tree/1ff44edf1fcaaee582b79141a419d61df62da56e
PositionEmbs
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/v4/cv47lmemhm4jgxapntmy6avnx2r4yydlpdjkih5r27hnltgiweqs.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.add] # Source node to ATen node mapping: # out => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 5, 4), (20, 4, 1)) assert_size_stride(primals_2, (4, 4, 5, 4), (80, 20, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 320, grid=grid(320), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 5, 4), (20, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 5, 4), (80, 20, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PositionEmbs(nn.Module): def __init__(self, num_patches, emb_dim, dropout_rate=0.1): super(PositionEmbs, self).__init__() self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, emb_dim)) if dropout_rate > 0: self.dropout = nn.Dropout(dropout_rate) else: self.dropout = None def forward(self, x): out = x + self.pos_embedding if self.dropout: out = self.dropout(out) return out def get_inputs(): return [torch.rand([4, 4, 5, 4])] def get_init_inputs(): return [[], {'num_patches': 4, 'emb_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 5, 4), (20, 4, 1)) assert_size_stride(primals_2, (4, 4, 5, 4), (80, 20, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(320)](primals_2, primals_1, buf0, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class PositionEmbsNew(nn.Module): def __init__(self, num_patches, emb_dim, dropout_rate=0.1): super(PositionEmbsNew, self).__init__() self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, emb_dim)) if dropout_rate > 0: self.dropout = nn.Dropout(dropout_rate) else: self.dropout = None def forward(self, input_0): primals_1 = self.pos_embedding primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
longxianlei/UtilsTools
PositionEmbs
false
10,488
[ "MIT" ]
0
f45c648eb679ed59bb512b61a1af52938e326ac3
https://github.com/longxianlei/UtilsTools/tree/f45c648eb679ed59bb512b61a1af52938e326ac3
FPNHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/w3/cw3zrmvca7grv74jw3rs72gt6ae2mq5prncci372h3zwksqmyouw.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) return (buf3, primals_1, primals_2, primals_3, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class FPNHead(nn.Module): def __init__(self, num_in, num_mid, num_out): super().__init__() self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) def forward(self, x): x = nn.functional.relu(self.block0(x), inplace=True) x = nn.functional.relu(self.block1(x), inplace=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in': 4, 'num_mid': 4, 'num_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=256, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, primals_3, buf1, buf4 class FPNHeadNew(nn.Module): def __init__(self, num_in, num_mid, num_out): super().__init__() self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) def forward(self, input_0): primals_1 = self.block0.weight primals_3 = self.block1.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lePossum/DeblurGANv2
FPNHead
false
10,489
[ "BSD-2-Clause" ]
0
b02c86de98f98604e2416a3a6121110ede7a2de9
https://github.com/lePossum/DeblurGANv2/tree/b02c86de98f98604e2416a3a6121110ede7a2de9
ClipLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l4/cl4tubtxyw5ee5xzznmwlohggffmq2we62eygecmxpgmibijhpes.py # Topologically Sorted Source Nodes: [norms, data], Original ATen: [aten.linalg_vector_norm, aten.mul] # Source node to ATen node mapping: # data => mul_1 # norms => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %view_1), kwargs = {}) # %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %mul_1), kwargs = {}) triton_per_fused_linalg_vector_norm_mul_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mul_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_mul_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.full([1, 1], 1, tl.int32) tmp8 = tmp7 / tmp6 tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = 1.0 tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = tmp0 * tmp12 tl.store(out_ptr2 + (r1 + (64*x0)), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [norms, data], Original ATen: [aten.linalg_vector_norm, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_mul_0.run(arg0_1, arg0_1, 4, 64, grid=grid(4), stream=stream0) return (arg0_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def clip_data(data, max_norm): norms = torch.norm(data.reshape(data.shape[0], -1), dim=-1) scale = (max_norm / norms).clamp(max=1.0) data *= scale.reshape(-1, 1, 1, 1) return data class ClipLayer(nn.Module): def __init__(self, max_norm): super(ClipLayer, self).__init__() self.max_norm = max_norm def forward(self, x): return clip_data(x, self.max_norm) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'max_norm': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_per_fused_linalg_vector_norm_mul_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.full([1, 1], 1, tl.int32) tmp8 = tmp7 / tmp6 tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = 1.0 tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = tmp0 * tmp12 tl.store(out_ptr2 + (r1 + 64 * x0), tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_per_fused_linalg_vector_norm_mul_0[grid(4)](arg0_1, arg0_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) return arg0_1, def clip_data(data, max_norm): norms = torch.norm(data.reshape(data.shape[0], -1), dim=-1) scale = (max_norm / norms).clamp(max=1.0) data *= scale.reshape(-1, 1, 1, 1) return data class ClipLayerNew(nn.Module): def __init__(self, max_norm): super(ClipLayerNew, self).__init__() self.max_norm = max_norm def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
lxuechen/Handcrafted-DP
ClipLayer
false
10,490
[ "MIT" ]
0
64ca4759238027e307d8e88215a0a86fc8f3b395
https://github.com/lxuechen/Handcrafted-DP/tree/64ca4759238027e307d8e88215a0a86fc8f3b395
PolynomialEnvelope
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/hz/chzlrhhvdiw3cqqyi2kylodpwhcjuju7rbif4i23erkdoy4vsy3m.py # Topologically Sorted Source Nodes: [lt, pow_1, mul, add, pow_2, mul_1, add_1, pow_3, mul_2, env_val, zeros_like, where], Original ATen: [aten.lt, aten.pow, aten.mul, aten.add, aten.zeros_like, aten.where] # Source node to ATen node mapping: # add => add # add_1 => add_1 # env_val => add_2 # lt => lt # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # where => where # zeros_like => full_default # Graph fragment: # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%arg0_1, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -15.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 24), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 6), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_3, -10.0), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %add_2, %full_default), kwargs = {}) triton_poi_fused_add_lt_mul_pow_where_zeros_like_0 = async_compile.triton('triton_poi_fused_add_lt_mul_pow_where_zeros_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_lt_mul_pow_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_lt_mul_pow_where_zeros_like_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp3 tmp5 = -15.0 tmp6 = tmp4 * tmp5 tmp7 = tmp6 + tmp1 tmp8 = tmp4 * tmp0 tmp9 = 24.0 tmp10 = tmp8 * tmp9 tmp11 = tmp7 + tmp10 tmp12 = tmp3 * tmp0 tmp13 = tmp12 * tmp12 tmp14 = -10.0 tmp15 = tmp13 * tmp14 tmp16 = tmp11 + tmp15 tmp17 = 0.0 tmp18 = tl.where(tmp2, tmp16, tmp17) tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lt, pow_1, mul, add, pow_2, mul_1, add_1, pow_3, mul_2, env_val, zeros_like, where], Original ATen: [aten.lt, aten.pow, aten.mul, aten.add, aten.zeros_like, aten.where] stream0 = get_raw_stream(0) triton_poi_fused_add_lt_mul_pow_where_zeros_like_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class PolynomialEnvelope(torch.nn.Module): """ Polynomial envelope function that ensures a smooth cutoff. Parameters ---------- exponent: int Exponent of the envelope function. """ def __init__(self, exponent): super().__init__() assert exponent > 0 self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, d_scaled): env_val = 1 + self.a * d_scaled ** self.p + self.b * d_scaled ** (self .p + 1) + self.c * d_scaled ** (self.p + 2) return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'exponent': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_lt_mul_pow_where_zeros_like_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp3 tmp5 = -15.0 tmp6 = tmp4 * tmp5 tmp7 = tmp6 + tmp1 tmp8 = tmp4 * tmp0 tmp9 = 24.0 tmp10 = tmp8 * tmp9 tmp11 = tmp7 + tmp10 tmp12 = tmp3 * tmp0 tmp13 = tmp12 * tmp12 tmp14 = -10.0 tmp15 = tmp13 * tmp14 tmp16 = tmp11 + tmp15 tmp17 = 0.0 tmp18 = tl.where(tmp2, tmp16, tmp17) tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_lt_mul_pow_where_zeros_like_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PolynomialEnvelopeNew(torch.nn.Module): """ Polynomial envelope function that ensures a smooth cutoff. Parameters ---------- exponent: int Exponent of the envelope function. """ def __init__(self, exponent): super().__init__() assert exponent > 0 self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
krylea/ocp
PolynomialEnvelope
false
10,491
[ "MIT" ]
0
00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
https://github.com/krylea/ocp/tree/00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
ScaledSiLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/tb/ctba7iaosb7jgdxzd2uonnb4zpebevarpcjx6lt3orlpglpmb64v.py # Topologically Sorted Source Nodes: [silu, mul], Original ATen: [aten.silu, aten.mul] # Source node to ATen node mapping: # mul => mul_1 # silu => mul, sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1.6666666666666667), kwargs = {}) triton_poi_fused_mul_silu_0 = async_compile.triton('triton_poi_fused_mul_silu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_silu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp3 = 1.6666666666666667 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [silu, mul], Original ATen: [aten.silu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_silu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class ScaledSiLU(torch.nn.Module): def __init__(self): super().__init__() self.scale_factor = 1 / 0.6 self._activation = torch.nn.SiLU() def forward(self, x): return self._activation(x) * self.scale_factor def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp3 = 1.6666666666666667 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_silu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ScaledSiLUNew(torch.nn.Module): def __init__(self): super().__init__() self.scale_factor = 1 / 0.6 self._activation = torch.nn.SiLU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
krylea/ocp
ScaledSiLU
false
10,492
[ "MIT" ]
0
00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
https://github.com/krylea/ocp/tree/00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
SiQU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/e3/ce3flp2f3766ehq6qmatay7ybft362x5j23kxeaedseuzqtovc4g.py # Topologically Sorted Source Nodes: [silu, mul], Original ATen: [aten.silu, aten.mul] # Source node to ATen node mapping: # mul => mul_1 # silu => mul, sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %mul), kwargs = {}) triton_poi_fused_mul_silu_0 = async_compile.triton('triton_poi_fused_mul_silu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_silu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [silu, mul], Original ATen: [aten.silu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_silu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SiQU(torch.nn.Module): def __init__(self): super().__init__() self._activation = torch.nn.SiLU() def forward(self, x): return x * self._activation(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_silu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SiQUNew(torch.nn.Module): def __init__(self): super().__init__() self._activation = torch.nn.SiLU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
krylea/ocp
SiQU
false
10,493
[ "MIT" ]
0
00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
https://github.com/krylea/ocp/tree/00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
DPSLTMAdapter
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py # Topologically Sorted Source Nodes: [h_0s], Original ATen: [aten.zeros] # Source node to ATen node mapping: # h_0s => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sw/cswlmqxq57h53l6axfd6psb5uckvpd32zawfihpwp3s4owvqpcsb.py # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # c_t => add_1 # f_t => sigmoid_1 # g_t => tanh # h_t => mul_2 # i_t => sigmoid # mul => mul # mul_1 => mul_1 # o_t => sigmoid_2 # tanh_1 => tanh_1 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_4,), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_5,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_6,), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_7,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %squeeze), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_2 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {}) # %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_71 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_18), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp35, xmask) tl.store(out_ptr4 + (x2), tmp38, xmask) tl.store(out_ptr5 + (x2), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tl/ctlt2p573ni7yx4xldsz64sjwqkudoxkn6d6yvb2r54qxlmxds5p.py # Topologically Sorted Source Nodes: [i_t_1, f_t_1, g_t_1, o_t_1, mul_3, mul_4, c_t_1, tanh_3, h_t_1], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # c_t_1 => add_3 # f_t_1 => sigmoid_4 # g_t_1 => tanh_2 # h_t_1 => mul_5 # i_t_1 => sigmoid_3 # mul_3 => mul_3 # mul_4 => mul_4 # o_t_1 => sigmoid_5 # tanh_3 => tanh_3 # Graph fragment: # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_8,), kwargs = {}) # %sigmoid_4 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_9,), kwargs = {}) # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_10,), kwargs = {}) # %sigmoid_5 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_11,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_4, %add_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %tanh_2), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {}) # %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_5, %tanh_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp28 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = tl.sigmoid(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = libdevice.tanh(tmp30) tmp33 = tmp15 * tmp32 tmp34 = tmp7 * tmp31 tmp35 = tmp33 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp37 = tmp23 * tmp36 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp31, xmask) tl.store(out_ptr4 + (x2), tmp35, xmask) tl.store(out_ptr5 + (x2), tmp37, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ks/cksi7m4m7yxvsvd2ce7ftnos6exaibxyudnraey6nxajdfw246hj.py # Topologically Sorted Source Nodes: [i_t_3, f_t_3, g_t_3, mul_9, mul_10, c_t_3, tanh_7], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # c_t_3 => add_7 # f_t_3 => sigmoid_10 # g_t_3 => tanh_6 # i_t_3 => sigmoid_9 # mul_10 => mul_10 # mul_9 => mul_9 # tanh_7 => tanh_7 # Graph fragment: # %sigmoid_9 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_16,), kwargs = {}) # %sigmoid_10 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_17,), kwargs = {}) # %tanh_6 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_18,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_10, %add_5), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_9, %tanh_6), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %tanh_7 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp25 = tmp15 * tmp24 tmp26 = tmp7 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = libdevice.tanh(tmp27) tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp23, xmask) tl.store(out_ptr3 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/o5/co5z4awunowwwrh5of536l26ozrjuwdhph5f2zpgdrfapcc3dduk.py # Topologically Sorted Source Nodes: [o_t_3], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # o_t_3 => sigmoid_11 # Graph fragment: # %sigmoid_11 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_19,), kwargs = {}) triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/vm/cvmcreohmmmgtc2sngri4exy6k5t5hutelgjbv763z3d5cnripkm.py # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] # Source node to ATen node mapping: # h_n => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %mul_5, %mul_8, %mul_11],), kwargs = {}) triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_0s], Original ATen: [aten.zeros] stream0 = get_raw_stream(0) triton_poi_fused_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward] triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1.run(buf1, primals_3, buf2, primals_5, buf0, buf3, buf5, buf4, buf6, buf32, buf7, 16, grid=grid(16), stream=stream0) buf8 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf8) buf9 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t_1, f_t_1, g_t_1, o_t_1, mul_3, mul_4, c_t_1, tanh_3, h_t_1], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf8, primals_3, buf9, primals_5, buf6, buf10, buf11, buf13, buf12, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf16) buf17 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t_2, f_t_2, g_t_2, o_t_2, mul_6, mul_7, c_t_2, tanh_5, h_t_2], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf16, primals_3, buf17, primals_5, buf14, buf18, buf19, buf21, buf20, buf22, buf23, 16, grid=grid(16), stream=stream0) buf24 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf24) del primals_2 buf25 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf23, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf25) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_t_3, f_t_3, g_t_3, mul_9, mul_10, c_t_3, tanh_7], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_3.run(buf24, primals_3, buf25, primals_5, buf22, buf26, buf27, buf28, buf30, 16, grid=grid(16), stream=stream0) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [o_t_3], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_4.run(buf24, primals_3, buf25, primals_5, buf29, 16, grid=grid(16), stream=stream0) del buf24 del primals_3 del primals_5 buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] triton_poi_fused_stack_5.run(buf7, buf15, buf23, buf29, buf30, buf31, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf3, buf4, buf5, buf6, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf19, buf20, buf21, buf22, buf23, buf26, buf27, buf28, buf29, buf30, primals_4, buf32, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from typing import Tuple from typing import List from typing import Optional from typing import Dict from typing import Union from torch.nn.modules.module import _IncompatibleKeys def filter_out_old_keys(self, state_dict, prefix, local_metadata): new_state_dict = {param_name: param_value for param_name, param_value in state_dict.items() if param_name not in self.old_to_new} return new_state_dict class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCell(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: gates = self.ih(x) + self.hh(h_prev) i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates, self.hidden_size, 1) i_t = torch.sigmoid(i_t_input) f_t = torch.sigmoid(f_t_input) g_t = torch.tanh(g_t_input) o_t = torch.sigmoid(o_t_input) c_t = f_t * c_prev + i_t * g_t h_t = o_t * torch.tanh(c_t) return h_t, c_t class DPLSTMLayer(nn.Module): """ Implements *one* layer of LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float', reverse: 'bool'=False): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.reverse = reverse self.cell = DPLSTMCell(input_size=input_size, hidden_size= hidden_size, bias=bias) self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTMLayer when a sequence is given in input. Args: x: Input sequence to the DPLSTMCell of shape ``[T, B, D]``. state_init: Initial state of the LSTMCell as a tuple ``(h_0, c_0)`` where ``h_0`` is the initial hidden state and ``c_0`` is the initial cell state of the DPLSTMCell Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTMCell for each timestep ``t``. - ``h_n`` is of shape ``[B, H]`` and is a tensor containing the hidden state for ``t = T``. - ``c_n`` is of shape ``[B, H]`` tensor containing the cell state for ``t = T``. """ seq_length, _batch_sz, _ = x.shape if self.reverse: x = x.flip(0) x = torch.unbind(x, dim=0) h_0, c_0 = state_init h_n = [h_0] c_n = [c_0] for t in range(seq_length): h_next, c_next = self.cell(x[t], h_n[t], c_n[t]) if self.dropout: h_next = self.dropout_layer(h_next) h_n.append(h_next) c_n.append(c_next) h_n = torch.stack(h_n[1:], dim=0) return h_n.flip(0) if self.reverse else h_n, (h_n[-1], c_n[-1]) class BidirectionalDPLSTMLayer(nn.Module): """ Implements *one* layer of Bidirectional LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.forward_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=False) self.reverse_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=True) def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[P, B, H] contains the initial hidden state - c_0 of shape ``[P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[P, B, H]`` and contains the cell state for ``t = T``. """ h0, c0 = state_init h0_f, h0_r = h0.unbind(0) c0_f, c0_r = c0.unbind(0) out_f, (h_f, c_f) = self.forward_layer(x, (h0_f, c0_f)) out_r, (h_r, c_r) = self.reverse_layer(x, (h0_r, c0_r)) out = torch.cat([out_f, out_r], dim=-1) h = torch.stack([h_f, h_r], dim=0) c = torch.stack([c_f, c_r], dim=0) return out, (h, c) class ParamRenamedModule(nn.Module): """ This class defines a nn.Module whose parameters are renamed. This is useful when you want to reimplement a layer but make sure its state_dict and list of parameters are exactly the same as another reference layer so that you can have a drop-in replacement that does not depend on how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our implementation leverages submodules and requires alignment to the state_dict of nn.LSTM. """ def __init__(self, rename_map: 'Dict[str, str]'): """ Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need to rename your model's state. Args: rename_map: mapping from old name -> new name for each parameter you want renamed. Note that this must be a 1:1 mapping! """ super().__init__() self.old_to_new = rename_map self.new_to_old = {v: k for k, v in rename_map.items()} self._register_state_dict_hook(filter_out_old_keys) def _register_renamed_parameters(self): """ Internal function. This function simply registers parameters under their new name. They will automatically mask their duplicates coming from submodules. This trick works because self.parameters() proceeds recursively from the top, going into submodules after processing items at the current level, and will not return duplicates. """ for old_name, param in super().named_parameters(): if old_name in self.old_to_new: new_name = self.old_to_new[old_name] self.register_parameter(new_name, param) def __setattr__(self, name: 'str', value: 'Union[Tensor, nn.Module]' ) ->None: """ Whenever you set an attribute, eg `self.linear`, this is called to actually register it in any nn.Module. We rely on the masking trick explained in the docs for ``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter in the rename list is detected, we rename and mask it so next time this is called we will no longer find it. """ super().__setattr__(name, value) try: self._register_renamed_parameters() except ModuleAttributeError: pass def load_state_dict(self, state_dict: 'Dict[str, Tensor]', strict: 'bool'=True): """ Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys. """ missing_keys, unexpected_keys = super().load_state_dict(state_dict, strict=False) missing_keys = [k for k in missing_keys if k not in self.old_to_new] if strict: error_msgs = [] if len(unexpected_keys) > 0: error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '. join('"{}"'.format(k) for k in unexpected_keys))) if len(missing_keys) > 0: error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '. format(', '.join('"{}"'.format(k) for k in missing_keys))) if len(error_msgs) > 0: raise RuntimeError( 'Error(s) in loading state_dict for {}:\n\t{}'.format( self.__class__.__name__, '\n\t'.join(error_msgs))) return _IncompatibleKeys(missing_keys, unexpected_keys) class DPLSTM(ParamRenamedModule): """ DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module. Its state_dict matches that of nn.LSTM exactly, so that after training it can be exported and loaded by an nn.LSTM for inference. Refer to nn.LSTM's documentation for all parameters and inputs. """ def __init__(self, input_size: 'int', hidden_size: 'int', num_layers: 'int'=1, bias: 'bool'=True, batch_first: 'bool'=False, dropout: 'float'=0, bidirectional: 'bool'=False): rename_dict = self._make_rename_dict(num_layers, bias, bidirectional) super().__init__(rename_dict) self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.bidirectional = bidirectional self.num_directions = 2 if self.bidirectional else 1 LayerClass = BidirectionalDPLSTMLayer if bidirectional else DPLSTMLayer self.layers = nn.ModuleList([LayerClass(input_size=self.input_size if i == 0 else self.hidden_size * self.num_directions, hidden_size =self.hidden_size, bias=self.bias, dropout=self.dropout if i < self.num_layers - 1 else 0) for i in range(num_layers)]) def forward(self, x: 'torch.Tensor', state_init: 'Optional[Tuple[torch.Tensor, torch.Tensor]]'=None) ->Tuple[torch. Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - L: number of layers in the LSTM - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[L*P, B, H] contains the initial hidden state - c_0 of shape ``[L*P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[L * P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[L * P, B, H]`` and contains the cell state for ``t = T``. """ x = self._rearrange_batch_dim(x) _T, B, _D = x.shape L = self.num_layers P = 2 if self.bidirectional else 1 H = self.hidden_size h_0s, c_0s = state_init or (None, None) if h_0s is None: h_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: h_0s = h_0s.reshape([L, P, B, H]) if c_0s is None: c_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: c_0s = c_0s.reshape([L, P, B, H]) hs: 'List[torch.Tensor]' = [] cs: 'List[torch.Tensor]' = [] for layer, h0, c0 in zip(self.layers, h_0s, c_0s): if not self.bidirectional: h0 = h0.squeeze() c0 = c0.squeeze() x, (h, c) = layer(x, (h0, c0)) if not self.bidirectional: h = h.unsqueeze(0) c = c.unsqueeze(0) hs.append(h) cs.append(c) hs = torch.cat(hs, dim=0) cs = torch.cat(cs, dim=0) out = self._rearrange_batch_dim(x) return out, (hs, cs) def _rearrange_batch_dim(self, x: 'torch.Tensor') ->torch.Tensor: if self.batch_first: x = x.transpose(0, 1) return x def __repr__(self): s = f'DPLSTM({self.input_size}, {self.hidden_size}, bias={self.bias}' if self.batch_first: s += f', batch_first={self.batch_first}' if self.num_layers > 1: s += f', num_layers={self.num_layers}' if self.dropout: s += f', dropout={self.dropout}' if self.bidirectional: s += f', bidirectional={self.bidirectional}' return s def _make_rename_dict(self, num_layers, bias, bidirectional): """ Programmatically constructs a dictionary old_name -> new_name to align with the param names used in ``torch.nn.LSTM``. """ d = {} components = ['weight'] + ['bias' if bias else []] matrices = ['ih', 'hh'] for i in range(num_layers): for c in components: for m in matrices: nn_name = f'{c}_{m}_l{i}' if bidirectional: d[f'layers.{i}.forward_layer.cell.{m}.{c}'] = nn_name d[f'layers.{i}.reverse_layer.cell.{m}.{c}' ] = nn_name + '_reverse' else: d[f'layers.{i}.cell.{m}.{c}'] = nn_name return d class DPSLTMAdapter(nn.Module): """ Adapter for DPLSTM. LSTM returns a tuple, but our testing tools need the model to return a single tensor in output. We do this adaption here. """ def __init__(self, *args, **kwargs): super().__init__() self.dplstm = DPLSTM(*args, **kwargs) def forward(self, x): out, _rest = self.dplstm(x) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import Tensor import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from typing import Tuple from typing import List from typing import Optional from typing import Dict from typing import Union from torch.nn.modules.module import _IncompatibleKeys assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = tl.sigmoid(tmp30) tmp33 = tmp31 * tmp32 tmp34 = tmp7 * tmp23 tmp35 = tmp33 + tmp34 tmp36 = 1.0 tmp37 = tmp36 - tmp31 tmp38 = tmp31 * tmp37 tmp39 = libdevice.tanh(tmp35) tmp40 = tmp15 * tmp39 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp35, xmask) tl.store(out_ptr4 + x2, tmp38, xmask) tl.store(out_ptr5 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp28 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = tl.sigmoid(tmp22) tmp26 = tmp24 + tmp25 tmp29 = tmp27 + tmp28 tmp30 = tmp26 + tmp29 tmp31 = libdevice.tanh(tmp30) tmp33 = tmp15 * tmp32 tmp34 = tmp7 * tmp31 tmp35 = tmp33 + tmp34 tmp36 = libdevice.tanh(tmp35) tmp37 = tmp23 * tmp36 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp31, xmask) tl.store(out_ptr4 + x2, tmp35, xmask) tl.store(out_ptr5 + x2, tmp37, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp16 + tmp17 tmp21 = tmp19 + tmp20 tmp22 = tmp18 + tmp21 tmp23 = libdevice.tanh(tmp22) tmp25 = tmp15 * tmp24 tmp26 = tmp7 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = libdevice.tanh(tmp27) tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp23, xmask) tl.store(out_ptr3 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1 , primals_3, buf2, primals_5, buf0, buf3, buf5, buf4, buf6, buf32, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf8) buf9 = buf1 del buf1 extern_kernels.mm(buf7, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_3, buf9, primals_5, buf6, buf10, buf11, buf13, buf12, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf9 del buf9 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf16) buf17 = buf8 del buf8 extern_kernels.mm(buf15, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf16, primals_3, buf17, primals_5, buf14, buf18, buf19, buf21, buf20, buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf17 del buf17 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf24) del primals_2 buf25 = buf16 del buf16 extern_kernels.mm(buf23, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf25) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf24, primals_3, buf25, primals_5, buf22, buf26, buf27, buf28, buf30, 16, XBLOCK =16, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_sigmoid_4[grid(16)](buf24, primals_3, buf25, primals_5, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf24 del primals_3 del primals_5 buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0) del buf25 triton_poi_fused_stack_5[grid(64)](buf7, buf15, buf23, buf29, buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor( primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf3, buf4, buf5, buf6, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf19, buf20, buf21, buf22, buf23, buf26, buf27, buf28, buf29, buf30, primals_4, buf32) def filter_out_old_keys(self, state_dict, prefix, local_metadata): new_state_dict = {param_name: param_value for param_name, param_value in state_dict.items() if param_name not in self.old_to_new} return new_state_dict class LSTMLinear(nn.Linear): """ This function is the same as a nn.Linear layer, except that in the backward pass the grad_samples get accumulated (instead of being concatenated as in the standard nn.Linear) """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'bool'=True): super().__init__(in_features, out_features, bias) class DPLSTMCell(nn.Module): """ Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated applications of this class. """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias) self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias) self.reset_parameters() def reset_parameters(self): """ Resets parameters by initializing them from an uniform distribution. """ stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): nn.init.uniform_(weight, -stdv, stdv) def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: gates = self.ih(x) + self.hh(h_prev) i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates, self.hidden_size, 1) i_t = torch.sigmoid(i_t_input) f_t = torch.sigmoid(f_t_input) g_t = torch.tanh(g_t_input) o_t = torch.sigmoid(o_t_input) c_t = f_t * c_prev + i_t * g_t h_t = o_t * torch.tanh(c_t) return h_t, c_t class DPLSTMLayer(nn.Module): """ Implements *one* layer of LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float', reverse: 'bool'=False): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.reverse = reverse self.cell = DPLSTMCell(input_size=input_size, hidden_size= hidden_size, bias=bias) self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTMLayer when a sequence is given in input. Args: x: Input sequence to the DPLSTMCell of shape ``[T, B, D]``. state_init: Initial state of the LSTMCell as a tuple ``(h_0, c_0)`` where ``h_0`` is the initial hidden state and ``c_0`` is the initial cell state of the DPLSTMCell Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTMCell for each timestep ``t``. - ``h_n`` is of shape ``[B, H]`` and is a tensor containing the hidden state for ``t = T``. - ``c_n`` is of shape ``[B, H]`` tensor containing the cell state for ``t = T``. """ seq_length, _batch_sz, _ = x.shape if self.reverse: x = x.flip(0) x = torch.unbind(x, dim=0) h_0, c_0 = state_init h_n = [h_0] c_n = [c_0] for t in range(seq_length): h_next, c_next = self.cell(x[t], h_n[t], c_n[t]) if self.dropout: h_next = self.dropout_layer(h_next) h_n.append(h_next) c_n.append(c_next) h_n = torch.stack(h_n[1:], dim=0) return h_n.flip(0) if self.reverse else h_n, (h_n[-1], c_n[-1]) class BidirectionalDPLSTMLayer(nn.Module): """ Implements *one* layer of Bidirectional LSTM in a way amenable to differential privacy. We don't expect you to use this directly: use DPLSTM instead :) """ def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool', dropout: 'float'): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.dropout = dropout self.forward_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=False) self.reverse_layer = DPLSTMLayer(input_size=input_size, hidden_size =hidden_size, bias=bias, dropout=dropout, reverse=True) def forward(self, x: 'torch.Tensor', state_init: 'Tuple[torch.Tensor, torch.Tensor]') ->Tuple[torch.Tensor, Tuple[ torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[P, B, H] contains the initial hidden state - c_0 of shape ``[P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[P, B, H]`` and contains the cell state for ``t = T``. """ h0, c0 = state_init h0_f, h0_r = h0.unbind(0) c0_f, c0_r = c0.unbind(0) out_f, (h_f, c_f) = self.forward_layer(x, (h0_f, c0_f)) out_r, (h_r, c_r) = self.reverse_layer(x, (h0_r, c0_r)) out = torch.cat([out_f, out_r], dim=-1) h = torch.stack([h_f, h_r], dim=0) c = torch.stack([c_f, c_r], dim=0) return out, (h, c) class ParamRenamedModule(nn.Module): """ This class defines a nn.Module whose parameters are renamed. This is useful when you want to reimplement a layer but make sure its state_dict and list of parameters are exactly the same as another reference layer so that you can have a drop-in replacement that does not depend on how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our implementation leverages submodules and requires alignment to the state_dict of nn.LSTM. """ def __init__(self, rename_map: 'Dict[str, str]'): """ Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need to rename your model's state. Args: rename_map: mapping from old name -> new name for each parameter you want renamed. Note that this must be a 1:1 mapping! """ super().__init__() self.old_to_new = rename_map self.new_to_old = {v: k for k, v in rename_map.items()} self._register_state_dict_hook(filter_out_old_keys) def _register_renamed_parameters(self): """ Internal function. This function simply registers parameters under their new name. They will automatically mask their duplicates coming from submodules. This trick works because self.parameters() proceeds recursively from the top, going into submodules after processing items at the current level, and will not return duplicates. """ for old_name, param in super().named_parameters(): if old_name in self.old_to_new: new_name = self.old_to_new[old_name] self.register_parameter(new_name, param) def __setattr__(self, name: 'str', value: 'Union[Tensor, nn.Module]' ) ->None: """ Whenever you set an attribute, eg `self.linear`, this is called to actually register it in any nn.Module. We rely on the masking trick explained in the docs for ``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter in the rename list is detected, we rename and mask it so next time this is called we will no longer find it. """ super().__setattr__(name, value) try: self._register_renamed_parameters() except ModuleAttributeError: pass def load_state_dict(self, state_dict: 'Dict[str, Tensor]', strict: 'bool'=True): """ Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys. """ missing_keys, unexpected_keys = super().load_state_dict(state_dict, strict=False) missing_keys = [k for k in missing_keys if k not in self.old_to_new] if strict: error_msgs = [] if len(unexpected_keys) > 0: error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '. join('"{}"'.format(k) for k in unexpected_keys))) if len(missing_keys) > 0: error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '. format(', '.join('"{}"'.format(k) for k in missing_keys))) if len(error_msgs) > 0: raise RuntimeError( 'Error(s) in loading state_dict for {}:\n\t{}'.format( self.__class__.__name__, '\n\t'.join(error_msgs))) return _IncompatibleKeys(missing_keys, unexpected_keys) class DPLSTM(ParamRenamedModule): """ DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module. Its state_dict matches that of nn.LSTM exactly, so that after training it can be exported and loaded by an nn.LSTM for inference. Refer to nn.LSTM's documentation for all parameters and inputs. """ def __init__(self, input_size: 'int', hidden_size: 'int', num_layers: 'int'=1, bias: 'bool'=True, batch_first: 'bool'=False, dropout: 'float'=0, bidirectional: 'bool'=False): rename_dict = self._make_rename_dict(num_layers, bias, bidirectional) super().__init__(rename_dict) self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.bidirectional = bidirectional self.num_directions = 2 if self.bidirectional else 1 LayerClass = BidirectionalDPLSTMLayer if bidirectional else DPLSTMLayer self.layers = nn.ModuleList([LayerClass(input_size=self.input_size if i == 0 else self.hidden_size * self.num_directions, hidden_size =self.hidden_size, bias=self.bias, dropout=self.dropout if i < self.num_layers - 1 else 0) for i in range(num_layers)]) def forward(self, x: 'torch.Tensor', state_init: 'Optional[Tuple[torch.Tensor, torch.Tensor]]'=None) ->Tuple[torch. Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Implements the forward pass of the DPLSTM when a sequence is input. Dimensions as follows: - B: Batch size - T: Sequence length - D: LSTM input hidden size (eg from a word embedding) - H: LSTM output hidden size - L: number of layers in the LSTM - P: number of directions (2 if bidirectional, else 1) Args: x: Input sequence to the DPLSTM of shape ``[T, B, D]`` state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where: - h_0 of shape ``[L*P, B, H] contains the initial hidden state - c_0 of shape ``[L*P, B, H] contains the initial cell state This argument can be (and defaults to) None, in which case zero tensors will be used. Returns: ``output, (h_n, c_n)`` where: - ``output`` is of shape ``[T, B, H * P]`` and is a tensor containing the output features (``h_t``) from the last layer of the DPLSTM for each timestep ``t``. - ``h_n`` is of shape ``[L * P, B, H]`` and contains the hidden state for ``t = T``. - ``c_n`` is of shape ``[L * P, B, H]`` and contains the cell state for ``t = T``. """ x = self._rearrange_batch_dim(x) _T, B, _D = x.shape L = self.num_layers P = 2 if self.bidirectional else 1 H = self.hidden_size h_0s, c_0s = state_init or (None, None) if h_0s is None: h_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: h_0s = h_0s.reshape([L, P, B, H]) if c_0s is None: c_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype, device=x[0].device) else: c_0s = c_0s.reshape([L, P, B, H]) hs: 'List[torch.Tensor]' = [] cs: 'List[torch.Tensor]' = [] for layer, h0, c0 in zip(self.layers, h_0s, c_0s): if not self.bidirectional: h0 = h0.squeeze() c0 = c0.squeeze() x, (h, c) = layer(x, (h0, c0)) if not self.bidirectional: h = h.unsqueeze(0) c = c.unsqueeze(0) hs.append(h) cs.append(c) hs = torch.cat(hs, dim=0) cs = torch.cat(cs, dim=0) out = self._rearrange_batch_dim(x) return out, (hs, cs) def _rearrange_batch_dim(self, x: 'torch.Tensor') ->torch.Tensor: if self.batch_first: x = x.transpose(0, 1) return x def __repr__(self): s = f'DPLSTM({self.input_size}, {self.hidden_size}, bias={self.bias}' if self.batch_first: s += f', batch_first={self.batch_first}' if self.num_layers > 1: s += f', num_layers={self.num_layers}' if self.dropout: s += f', dropout={self.dropout}' if self.bidirectional: s += f', bidirectional={self.bidirectional}' return s def _make_rename_dict(self, num_layers, bias, bidirectional): """ Programmatically constructs a dictionary old_name -> new_name to align with the param names used in ``torch.nn.LSTM``. """ d = {} components = ['weight'] + ['bias' if bias else []] matrices = ['ih', 'hh'] for i in range(num_layers): for c in components: for m in matrices: nn_name = f'{c}_{m}_l{i}' if bidirectional: d[f'layers.{i}.forward_layer.cell.{m}.{c}'] = nn_name d[f'layers.{i}.reverse_layer.cell.{m}.{c}' ] = nn_name + '_reverse' else: d[f'layers.{i}.cell.{m}.{c}'] = nn_name return d class DPSLTMAdapterNew(nn.Module): """ Adapter for DPLSTM. LSTM returns a tuple, but our testing tools need the model to return a single tensor in output. We do this adaption here. """ def __init__(self, *args, **kwargs): super().__init__() self.dplstm = DPLSTM(*args, **kwargs) def forward(self, input_0): primals_2 = self.dplstm.weight_ih_l0 primals_3 = self.dplstm.bias_ih_l0 primals_4 = self.dplstm.weight_hh_l0 primals_5 = self.dplstm.bias_hh_l0 primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
madhavajay/opacus
DPSLTMAdapter
false
10,494
[ "Apache-2.0" ]
0
7ae098764b4cf2388c66e263dd8d56bca0a290d0
https://github.com/madhavajay/opacus/tree/7ae098764b4cf2388c66e263dd8d56bca0a290d0
CombineSlices
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xj/cxjyiylkxhhw7puycpyh4rhisqin43l5kgucj4lyyniez2ypunvk.py # Topologically Sorted Source Nodes: [index_select], Original ATen: [aten.index_select] # Source node to ATen node mapping: # index_select => index # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, None, %full_default]), kwargs = {}) triton_poi_fused_index_select_0 = async_compile.triton('triton_poi_fused_index_select_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_select_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [index_select], Original ATen: [aten.index_select] stream0 = get_raw_stream(0) triton_poi_fused_index_select_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.optim class CombineSlices(nn.Module): def __init__(self, slice_dim=2): super().__init__() self.slice_dim = slice_dim def forward(self, x): return torch.index_select(x, dim=self.slice_dim, index=torch.tensor (0, device=x.device)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_select_0[grid(64)](arg0_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 return buf0, class CombineSlicesNew(nn.Module): def __init__(self, slice_dim=2): super().__init__() self.slice_dim = slice_dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kapoor1992/fastMRI
CombineSlices
false
10,495
[ "MIT" ]
0
6b0af94663faa55a2dd901a6a5cbb7d7b5f4cf6d
https://github.com/kapoor1992/fastMRI/tree/6b0af94663faa55a2dd901a6a5cbb7d7b5f4cf6d
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/4s/c4szhh35rbtilv4zlnanegnq2hofrkvv7yac3nsynw6qjxjbg3tg.py # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.clone] # Source node to ATen node mapping: # bilinear => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/3a/c3a7jkhbifqenis2g23pmqmidmnwe5he2pwgbrunksukoz44fhmm.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat] # Source node to ATen node mapping: # logits => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf2 = buf1 del buf1 # Topologically Sorted Source Nodes: [bilinear_1], Original ATen: [aten._trilinear] buf3 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf2, primals_4, buf4, buf5, 32, grid=grid(32), stream=stream0) del buf2 del buf4 del primals_4 return (buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Discriminator(nn.Module): def __init__(self, n_h): super(Discriminator, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = torch.unsqueeze(c, 1) c_x = c_x.expand_as(h_pl) sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2) sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 1) return logits def get_inputs(): return [torch.rand([4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_2, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor( buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf2 = buf1 del buf1 buf3 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_5, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor( buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](buf2, primals_4, buf4, buf5, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 del buf4 del primals_4 return buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor( primals_5, (16, 4), (4, 1), 0) class DiscriminatorNew(nn.Module): def __init__(self, n_h): super(DiscriminatorNew, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1, input_2): primals_3 = self.f_k.weight primals_4 = self.f_k.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mess-clarifai/DGI
Discriminator
false
10,496
[ "MIT" ]
0
3a7c96d59991d448b84d709916d1d5f256e5b9be
https://github.com/mess-clarifai/DGI/tree/3a7c96d59991d448b84d709916d1d5f256e5b9be
SphericalBesselBasis
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/mh/cmhudqgyaffvfgzwidcjby2k4225p53z7fnky2nqvz37ykelzacp.py # Topologically Sorted Source Nodes: [truediv, mul, sin, mul_1], Original ATen: [aten.reciprocal, aten.mul, aten.sin] # Source node to ATen node mapping: # mul => mul_1 # mul_1 => mul_2 # sin => sin # truediv => mul, reciprocal # Graph fragment: # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%unsqueeze,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 0.1767766952966369), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %unsqueeze), kwargs = {}) # %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sin), kwargs = {}) triton_poi_fused_mul_reciprocal_sin_0 = async_compile.triton('triton_poi_fused_mul_reciprocal_sin_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_reciprocal_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 0.1767766952966369 tmp4 = tmp2 * tmp3 tmp6 = tmp5 * tmp0 tmp7 = tl_math.sin(tmp6) tmp8 = tmp4 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv, mul, sin, mul_1], Original ATen: [aten.reciprocal, aten.mul, aten.sin] stream0 = get_raw_stream(0) triton_poi_fused_mul_reciprocal_sin_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np class SphericalBesselBasis(torch.nn.Module): """ 1D spherical Bessel basis Parameters ---------- num_radial: int Controls maximum frequency. cutoff: float Cutoff distance in Angstrom. """ def __init__(self, num_radial: 'int', cutoff: 'float'): super().__init__() self.norm_const = math.sqrt(2 / cutoff ** 3) self.frequencies = torch.nn.Parameter(data=torch.tensor(np.pi * np. arange(1, num_radial + 1, dtype=np.float32)), requires_grad=True) def forward(self, d_scaled): return self.norm_const / d_scaled[:, None] * torch.sin(self. frequencies * d_scaled[:, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_radial': 4, 'cutoff': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 0.1767766952966369 tmp4 = tmp2 * tmp3 tmp6 = tmp5 * tmp0 tmp7 = tl_math.sin(tmp6) tmp8 = tmp4 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_sin_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2 class SphericalBesselBasisNew(torch.nn.Module): """ 1D spherical Bessel basis Parameters ---------- num_radial: int Controls maximum frequency. cutoff: float Cutoff distance in Angstrom. """ def __init__(self, num_radial: 'int', cutoff: 'float'): super().__init__() self.norm_const = math.sqrt(2 / cutoff ** 3) self.frequencies = torch.nn.Parameter(data=torch.tensor(np.pi * np. arange(1, num_radial + 1, dtype=np.float32)), requires_grad=True) def forward(self, input_0): primals_2 = self.frequencies primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
krylea/ocp
SphericalBesselBasis
false
10,497
[ "MIT" ]
0
00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540
https://github.com/krylea/ocp/tree/00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540